diff -Nru landscape-client-23.02/debian/changelog landscape-client-23.08/debian/changelog --- landscape-client-23.02/debian/changelog 2023-02-08 18:23:31.000000000 +0000 +++ landscape-client-23.08/debian/changelog 2023-08-17 18:41:21.000000000 +0000 @@ -1,3 +1,25 @@ +landscape-client (23.08-0ubuntu1) mantic; urgency=medium + + * New upstream release 23.08 + - Restructure landscape-config wizard (LP: #2031673) + - Add SnapMonitor, SnapManager, and related machinery (LP: #2031682) + - Limit message data directory size (LP: #2004591) + - Add zfs and btrfs to stable fs-lists (LP: #1499104) + - Test for broken control file (LP: #1813442) + - Config man page and help info fixes (LP: #1662222) + - Removed unnecesary select to the database (LP: #1994951) + - Last apt-key removal (LP: #1990435) + - Added a logging level check to provide a better error (LP: #2027521) + - Broker is able to bootstrap the landscape-client folder (LP: #1868730) + - Changed gpg file name to be less generic (LP: #2008432) + - Improve Dependency data is read from the deb packages (LP: #1813442) + - Fix error in swift plugin (LP: #2031674) + - Repository profiles do not overwrite all configurations (LP: #2031680) + - Remove old plugins (LP: #1989968) + - Send ubuntu pro reboot output (LP: #2031684) + + -- Kevin Nasto Thu, 17 Aug 2023 13:41:21 -0500 + landscape-client (23.02-0ubuntu1) lunar; urgency=medium * New upstream release 23.02: @@ -934,11 +956,11 @@ landscape-client (1.0.21.1-0ubuntu1) intrepid; urgency=low - * New upstream version: + * New upstream version: * Add ok-no-register option to landscape-config script to not fail if dbus isn't started or landscape-client isn't running. * lower timeout related to package management in landscape. - * debian/control: Depend on cron. + * debian/control: Depend on cron. * debian/landscape-client.postinst: use ok-no-register option so that the postinst script doesn't fail when run from the installer. (LP: #274573). @@ -986,7 +1008,7 @@ [ Mathias Gug ] * debian/landscape-common.postinst, debian/landscape-common.prerm: don't call update-motd init script as it's no longer available in the - update-motd package. Call directly /usr/sbin/update-motd instead. + update-motd package. Call directly /usr/sbin/update-motd instead. (LP: #271854) -- Mathias Gug Thu, 18 Sep 2008 16:47:08 -0400 @@ -1006,15 +1028,15 @@ command. A landscape account is not required to use this package. - landscape-client: has all the binaries required to run the landscape-client. Requires a landscape account. - - debian/control: + - debian/control: + move some dependencies to landscape-client so that - landscape-common doesn't install unecessary packages in the + landscape-common doesn't install unecessary packages in the default -server install. + move python-gobject to a Pre-Depends so that landscape-config can register the system during the postinst (LP: #268838). * debian/control: - depend on python-smartpm instead of smartpm-core. - * debian/landscape-client.postrm: delete /etc/landscape/client.conf when + * debian/landscape-client.postrm: delete /etc/landscape/client.conf when the package is purged. * debian/landscape-client.postinst: remove sysinfo_in_motd debconf question as it wasn't used. @@ -1030,7 +1052,7 @@ landscape-client (1.0.18-0ubuntu1) intrepid; urgency=low - * New upstream release + * New upstream release -- Rick Clark Mon, 08 Sep 2008 16:35:57 -0500 @@ -1149,7 +1171,7 @@ landscape-client (1.0.10-hardy1-landscape1) hardy; urgency=low - * Change the utilisation of the csv module to be compatible with + * Change the utilisation of the csv module to be compatible with python 2.4 as used in Dapper. -- Andreas Hasenack Thu, 12 Jun 2008 17:58:05 +0000 @@ -1398,7 +1420,7 @@ fixed (#114829). -- Landscape Team Mon, 15 May 2007 16:31:00 -0700 - + landscape-client (0.10.2-1ubuntu1) feisty; urgency=low * New upstream release. @@ -1412,7 +1434,7 @@ * Minor fix in package management plugin timings. -- Landscape Team Thu, 10 May 2007 10:00:00 -0700 - + landscape-client (0.10.0-1ubuntu1) feisty; urgency=low * New upstream release. @@ -1422,9 +1444,9 @@ * The client uses the new operations system. Support for the now unused action info system is gone. * A minor bpickle bug was fixed. - + -- Jamshed Kakar Mon, 7 May 2007 20:16:00 -0700 - + landscape-client (0.9.6-1ubuntu1) feisty; urgency=low * New upstream release. @@ -1507,7 +1529,7 @@ * New upstream release. * Account registration log message no longer exposes account password. - + -- Jamshed Kakar Thu, 18 Jan 2007 23:07:00 -0800 landscape-client (0.8.0-1) unstable; urgency=low @@ -1518,7 +1540,7 @@ * Client includes an SSL certificate to verify the server with. * Package depends on python2.4-pycurl, which is necessary for the new SSL support. - + -- Jamshed Kakar Thu, 18 Jan 2007 10:48:00 -0800 landscape-client (0.7.0-1) unstable; urgency=low @@ -1529,7 +1551,7 @@ added to control log verbosity. * Package depends on perl-modules, which is necessary for bare-bones server installs. - + -- Jamshed Kakar Tue, 2 Jan 2007 12:54:00 -0800 landscape-client (0.6.1-1) unstable; urgency=low diff -Nru landscape-client-23.02/debian/copyright landscape-client-23.08/debian/copyright --- landscape-client-23.02/debian/copyright 2023-02-08 18:23:31.000000000 +0000 +++ landscape-client-23.08/debian/copyright 2023-08-17 18:41:21.000000000 +0000 @@ -33,4 +33,3 @@ On Debian systems, the complete text of the GNU General Public License can be found in `/usr/share/common-licenses/GPL-2'. - diff -Nru landscape-client-23.02/debian/landscape-client.postrm landscape-client-23.08/debian/landscape-client.postrm --- landscape-client-23.02/debian/landscape-client.postrm 2023-02-08 18:23:31.000000000 +0000 +++ landscape-client-23.08/debian/landscape-client.postrm 2023-08-17 18:41:21.000000000 +0000 @@ -29,7 +29,7 @@ rm -f "${LOG_DIR}/package-reporter.log"* rm -f "${LOG_DIR}/package-changer.log"* - rm -f "${GPG_DIR}/landscape-server"*.asc + rm -f "${GPG_DIR}/landscape-server-mirror"*.asc rm -rf "${DATA_DIR}/client" rm -rf "${DATA_DIR}/.gnupg" diff -Nru landscape-client-23.02/debian/README.source landscape-client-23.08/debian/README.source --- landscape-client-23.02/debian/README.source 2023-02-08 18:23:31.000000000 +0000 +++ landscape-client-23.08/debian/README.source 2023-08-17 18:41:21.000000000 +0000 @@ -12,4 +12,4 @@ landscape/__init__.py to include the entire new client version number. This helps us keep track of exact version of clients in use. There's no need to update the DEBIAN_REVISION variable, as it gets automatically set at build -time. +time. diff -Nru landscape-client-23.02/debian/source.lintian-overrides landscape-client-23.08/debian/source.lintian-overrides --- landscape-client-23.02/debian/source.lintian-overrides 2023-02-08 18:23:31.000000000 +0000 +++ landscape-client-23.08/debian/source.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -# we use dh_python or dh_python2 depending on the ubuntu release -# the package is being built on, this is detected dynamically -# in the rules file -landscape-client source: dh_python-is-obsolete \ No newline at end of file diff -Nru landscape-client-23.02/debian/watch landscape-client-23.08/debian/watch --- landscape-client-23.02/debian/watch 2023-02-08 18:23:31.000000000 +0000 +++ landscape-client-23.08/debian/watch 2023-08-17 18:41:21.000000000 +0000 @@ -1,3 +1,3 @@ version=4 opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/landscape-client-$1\.tar\.gz/ \ - https://github.com/CanonicalLtd/landscape-client/tags .*/?(\d{2}\.\d.\d)\.tar\.gz + https://github.com/CanonicalLtd/landscape-client/tags .*/?(\d{2}\.\d{2})\.tar\.gz diff -Nru landscape-client-23.02/dev/landscape-client-vm landscape-client-23.08/dev/landscape-client-vm --- landscape-client-23.02/dev/landscape-client-vm 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/dev/landscape-client-vm 2023-08-17 21:19:32.000000000 +0000 @@ -37,7 +37,7 @@ another account with the --account parameter). The built VM will be stored under ./build/intrepid along with some other -files. To launch the VM, cd to ./build/intrepid and issue: +files. To launch the VM, cd to ./build/intrepid and issue: $ ./run Once it's booted you can log into it with: $ ./ssh @@ -136,7 +136,7 @@ #!/bin/sh -e chown landscape /etc/landscape/client.conf chmod 600 /etc/landscape/client.conf -apt-key add /root/ppa-key +cp /root/ppa-key /etc/apt/trusted.gpg.d/landscape-server-mirror-root-ppa-key.asc echo "RUN=1" > /etc/default/landscape-client EOF chmod 755 script diff -Nru landscape-client-23.02/dev/upload-to-ppa landscape-client-23.08/dev/upload-to-ppa --- landscape-client-23.02/dev/upload-to-ppa 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/dev/upload-to-ppa 2023-08-17 21:19:32.000000000 +0000 @@ -112,7 +112,7 @@ else message="Built for $codename, no source changes" fi - cp debian/changelog ../ + cp debian/changelog ../ dch --force-distribution -b -v $version -D $codename -m $message dpkg-buildpackage -S $source_opt -k$key dput $ppa ../${package}_${version}_source.changes diff -Nru landscape-client-23.02/display_py2_testresults landscape-client-23.08/display_py2_testresults --- landscape-client-23.02/display_py2_testresults 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/display_py2_testresults 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,5 @@ #!/usr/bin/python -with open('_last_py2_res', 'r') as py2res: +with open("_last_py2_res", "r") as py2res: lines = py2res.readlines() lastline = lines[-1] @@ -7,7 +7,9 @@ time, total, total, err, fail, skip = lastline.split() if "".join((err, fail, skip)) != "000": - print("Python 2: \033[91mFAILED\033[0m (skips={}, failures={}, " - "errors={}, total={})".format(skip, fail, err, total)) + print( + "Python 2: \033[91mFAILED\033[0m (skips={}, failures={}, " + "errors={}, total={})".format(skip, fail, err, total), + ) else: - print("Python 2: \033[92mOK\033[0m (total={})".format(total)) + print(f"Python 2: \033[92mOK\033[0m (total={total})") diff -Nru landscape-client-23.02/example.conf landscape-client-23.08/example.conf --- landscape-client-23.02/example.conf 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/example.conf 2023-08-17 21:19:32.000000000 +0000 @@ -77,7 +77,6 @@ # # ActiveProcessInfo - lists active processes # ComputerInfo - various information -# HardwareInventory - information provided by the "lshw" command # LoadAverage - load information # MemoryInfo - memory information # MountInfo - information about mount points (space available, used) @@ -86,9 +85,17 @@ # PackageMonitor - packages installed, available, versions # UserMonitor - users, groups # RebootRequired - whether a reboot is required or not +# AptPreferences - system APT preferences configuration # NetworkActivity - network information (TX, RX) # NetworkDevice - a list of connected network devices # UpdateManager - controls when distribution upgrades are prompted +# CPUUsage - CPU usage information +# SwiftUsage - Swift cluster usage +# CephUsage - Ceph usage information +# ComputerTags - changes in computer tags +# UbuntuProInfo - Ubuntu Pro registration information +# LivePatch - Livepath status information +# UbuntuProRebootRequired - informs if the system needs to be rebooted # # The special value "ALL" is an alias for the full list of plugins. monitor_plugins = ALL @@ -130,6 +137,9 @@ # The number of seconds between package monitor runs. package_monitor_interval = 1800 +# The number of seconds between snap monitor runs. +snap_monitor_interval = 1800 + # The URL of the http proxy to use, if any. # This value is optional. # @@ -182,3 +192,9 @@ # The default is 512kB # 2MB is allowed in this example #script_output_limit=2048 + +# Whether files in /etc/apt/sources.list.d are removed when a repository +# profile is added to this machine. +# +# The default is True +#manage_sources_list_d = True diff -Nru landscape-client-23.02/.flake8 landscape-client-23.08/.flake8 --- landscape-client-23.02/.flake8 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/.flake8 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,5 @@ +[flake8] +max-line-length = 79 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 +ignore = E203, W503 diff -Nru landscape-client-23.02/.gitignore landscape-client-23.08/.gitignore --- landscape-client-23.02/.gitignore 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/.gitignore 2023-08-17 21:19:32.000000000 +0000 @@ -25,3 +25,9 @@ _last_py2_res *.pyc .cache +_trial_temp* +*.snap +landscape-client_amd64*.txt +landscape-client_arm64*.txt +landscape-client_ppc64el*.txt +landscape-client_s390x*.txt diff -Nru landscape-client-23.02/landscape/client/accumulate.py landscape-client-23.08/landscape/client/accumulate.py --- landscape-client-23.02/landscape/client/accumulate.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/accumulate.py 2023-08-17 21:19:32.000000000 +0000 @@ -72,24 +72,31 @@ """ -class Accumulator(object): - +class Accumulator: def __init__(self, persist, step_size): self._persist = persist self._step_size = step_size def __call__(self, new_timestamp, new_free_space, key): previous_timestamp, accumulated_value = self._persist.get(key, (0, 0)) - accumulated_value, step_data = \ - accumulate(previous_timestamp, accumulated_value, - new_timestamp, new_free_space, self._step_size) + accumulated_value, step_data = accumulate( + previous_timestamp, + accumulated_value, + new_timestamp, + new_free_space, + self._step_size, + ) self._persist.set(key, (new_timestamp, accumulated_value)) return step_data -def accumulate(previous_timestamp, accumulated_value, - new_timestamp, new_value, - step_size): +def accumulate( + previous_timestamp, + accumulated_value, + new_timestamp, + new_value, + step_size, +): previous_step = previous_timestamp // step_size new_step = new_timestamp // step_size step_boundary = new_step * step_size diff -Nru landscape-client-23.02/landscape/client/amp.py landscape-client-23.08/landscape/client/amp.py --- landscape-client-23.02/landscape/client/amp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/amp.py 2023-08-17 21:19:32.000000000 +0000 @@ -10,14 +10,15 @@ let the various services connect to each other in an easy and idiomatic way, and have them respond to standard requests like "ping" or "exit". """ -import os import logging +import os -from landscape.lib.amp import ( - MethodCallClientFactory, MethodCallServerFactory, RemoteObject) +from landscape.lib.amp import MethodCallClientFactory +from landscape.lib.amp import MethodCallServerFactory +from landscape.lib.amp import RemoteObject -class ComponentPublisher(object): +class ComponentPublisher: """Publish a Landscape client component using a UNIX socket. Other Landscape client processes can then connect to the socket and invoke @@ -72,7 +73,7 @@ return method -class ComponentConnector(object): +class ComponentConnector: """Utility superclass for creating connections with a Landscape component. @cvar component: The class of the component to connect to, it is expected @@ -90,6 +91,7 @@ @see: L{MethodCallClientFactory}. """ + factory = MethodCallClientFactory component = None # Must be defined by sub-classes remote = RemoteObject @@ -123,14 +125,14 @@ factory.factor = factor def fire_reconnect(ignored): - self._reactor.fire("%s-reconnect" % self.component.name) + self._reactor.fire(f"{self.component.name}-reconnect") def connected(remote): factory.notifyOnConnect(fire_reconnect) return remote def log_error(failure): - logging.error("Error while connecting to %s", self.component.name) + logging.error(f"Error while connecting to {self.component.name}") return failure socket_path = _get_socket_path(self.component, self._config) diff -Nru landscape-client-23.02/landscape/client/broker/amp.py landscape-client-23.08/landscape/client/broker/amp.py --- landscape-client-23.02/landscape/client/broker/amp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/amp.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,19 @@ -from twisted.internet.defer import maybeDeferred, execute, succeed +from twisted.internet.defer import execute +from twisted.internet.defer import maybeDeferred +from twisted.internet.defer import succeed from twisted.python.compat import iteritems -from landscape.lib.amp import RemoteObject, MethodCallArgument -from landscape.client.amp import ComponentConnector, get_remote_methods -from landscape.client.broker.server import BrokerServer +from landscape.client.amp import ComponentConnector +from landscape.client.amp import get_remote_methods from landscape.client.broker.client import BrokerClient -from landscape.client.monitor.monitor import Monitor +from landscape.client.broker.server import BrokerServer from landscape.client.manager.manager import Manager +from landscape.client.monitor.monitor import Monitor +from landscape.lib.amp import MethodCallArgument +from landscape.lib.amp import RemoteObject class RemoteBroker(RemoteObject): - def call_if_accepted(self, type, callable, *args): """Call C{callable} if C{type} is an accepted message type.""" deferred_types = self.get_accepted_message_types() @@ -18,6 +21,7 @@ def got_accepted_types(result): if type in result: return callable(*args) + deferred_types.addCallback(got_accepted_types) return deferred_types @@ -30,11 +34,10 @@ callable will be fired. """ result = self.listen_events(list(handlers.keys())) - return result.addCallback( - lambda args: handlers[args[0]](**args[1])) + return result.addCallback(lambda args: handlers[args[0]](**args[1])) -class FakeRemoteBroker(object): +class FakeRemoteBroker: """Looks like L{RemoteBroker}, but actually talks to local objects.""" def __init__(self, exchanger, message_store, broker_server): @@ -48,16 +51,19 @@ that they're encodable with AMP. """ original = getattr(self.broker_server, name, None) - if (name in get_remote_methods(self.broker_server) and - original is not None and - callable(original) - ): + if ( + name in get_remote_methods(self.broker_server) + and original is not None + and callable(original) + ): + def method(*args, **kwargs): for arg in args: assert MethodCallArgument.check(arg) for k, v in iteritems(kwargs): assert MethodCallArgument.check(v) return execute(original, *args, **kwargs) + return method else: raise AttributeError(name) @@ -76,8 +82,7 @@ callable will be fired. """ result = self.broker_server.listen_events(handlers.keys()) - return result.addCallback( - lambda args: handlers[args[0]](**args[1])) + return result.addCallback(lambda args: handlers[args[0]](**args[1])) def register(self): return succeed(None) @@ -114,8 +119,8 @@ RemoteBrokerConnector, RemoteClientConnector, RemoteMonitorConnector, - RemoteManagerConnector + RemoteManagerConnector, ] - return dict( - (connector.component.name, connector) - for connector in all_connectors) + return { + connector.component.name: connector for connector in all_connectors + } diff -Nru landscape-client-23.02/landscape/client/broker/client.py landscape-client-23.08/landscape/client/broker/client.py --- landscape-client-23.02/landscape/client/broker/client.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/client.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,23 @@ -from logging import info, exception, error, debug -import sys import random +import sys +from logging import debug +from logging import error +from logging import exception +from logging import info -from twisted.internet.defer import maybeDeferred, succeed +from twisted.internet.defer import maybeDeferred +from twisted.internet.defer import succeed +from landscape.client.amp import remote from landscape.lib.format import format_object from landscape.lib.twisted_util import gather_results -from landscape.client.amp import remote class HandlerNotFoundError(Exception): """A handler for the given message type was not found.""" -class BrokerClientPlugin(object): +class BrokerClientPlugin: """A convenience for writing L{BrokerClient} plugins. This provides a register method which will set up a bunch of @@ -32,6 +36,7 @@ sent. See L{landscape.broker.server.BrokerServer.send_message} for more details. """ + run_interval = 5 run_immediately = False scope = None # Global scope @@ -58,8 +63,10 @@ if acceptance: return callable(*args, **kwargs) - self.client.reactor.call_on(("message-type-acceptance-changed", type), - acceptance_changed) + self.client.reactor.call_on( + ("message-type-acceptance-changed", type), + acceptance_changed, + ) def _resynchronize(self, scopes=None): """ @@ -106,18 +113,27 @@ if self.run_immediately: self._run_with_error_log() if self.run_interval is not None: - delay = (random.random() * self.run_interval * - self.client.config.stagger_launch) - debug("delaying start of %s for %d seconds", - format_object(self), delay) + delay = ( + random.random() + * self.run_interval + * self.client.config.stagger_launch + ) + debug( + "delaying start of %s for %d seconds", + format_object(self), + delay, + ) self._loop = self.client.reactor.call_later( - delay, self._start_loop) + delay, + self._start_loop, + ) def _start_loop(self): """Launch the client loop.""" self._loop = self.client.reactor.call_every( self.run_interval, - self._run_with_error_log) + self._run_with_error_log, + ) def _run_with_error_log(self): """Wrap self.run in a Deferred with a logging error handler.""" @@ -134,7 +150,7 @@ return failure -class BrokerClient(object): +class BrokerClient: """Basic plugin registry for clients that have to deal with the broker. This knows about the needs of a client when dealing with the Landscape @@ -148,10 +164,11 @@ @param reactor: A L{LandscapeReactor}. """ + name = "client" def __init__(self, reactor, config): - super(BrokerClient, self).__init__() + super().__init__() self.reactor = reactor self.broker = None self.config = config @@ -179,7 +196,7 @@ """ info("Registering plugin %s.", format_object(plugin)) self._plugins.append(plugin) - if hasattr(plugin, 'plugin_name'): + if hasattr(plugin, "plugin_name"): self._plugin_names[plugin.plugin_name] = plugin plugin.register(self) @@ -210,15 +227,16 @@ @return: The return value of the handler, if found. @raises: HandlerNotFoundError if the handler was not found """ - type = message["type"] - handler = self._registered_messages.get(type) + typ = message["type"] + handler = self._registered_messages.get(typ) if handler is None: - raise HandlerNotFoundError(type) + raise HandlerNotFoundError(typ) try: return handler(message) except Exception: - exception("Error running message handler for type %r: %r" - % (type, handler)) + exception( + f"Error running message handler for type {typ!r}: {handler!r}", + ) @remote def message(self, message): @@ -259,8 +277,9 @@ results = self.reactor.fire((event_type, message_type), acceptance) else: results = self.reactor.fire(event_type, *args, **kwargs) - return gather_results([ - maybeDeferred(lambda x: x, result) for result in results]) + return gather_results( + [maybeDeferred(lambda x: x, result) for result in results], + ) def handle_reconnect(self): """Called when the connection with the broker is established again. @@ -273,8 +292,8 @@ - Re-register ourselves as client, so the broker knows we exist and will talk to us firing events and dispatching messages. """ - for type in self._registered_messages: - self.broker.register_client_accepted_message_type(type) + for typ in self._registered_messages: + self.broker.register_client_accepted_message_type(typ) self.broker.register_client(self.name) @remote diff -Nru landscape-client-23.02/landscape/client/broker/config.py landscape-client-23.08/landscape/client/broker/config.py --- landscape-client-23.02/landscape/client/broker/config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ """Configuration class for the broker.""" - import os from landscape.client.deployment import Configuration @@ -12,7 +11,7 @@ """ def __init__(self): - super(BrokerConfiguration, self).__init__() + super().__init__() self._original_http_proxy = os.environ.get("http_proxy") self._original_https_proxy = os.environ.get("https_proxy") @@ -33,35 +32,67 @@ - C{http_proxy} - C{https_proxy} """ - parser = super(BrokerConfiguration, self).make_parser() + parser = super().make_parser() - parser.add_option("-a", "--account-name", metavar="NAME", - help="The account this computer belongs to.") - parser.add_option("-p", "--registration-key", metavar="KEY", - help="The account-wide key used for " - "registering clients.") - parser.add_option("-t", "--computer-title", metavar="TITLE", - help="The title of this computer") - parser.add_option("--exchange-interval", default=15 * 60, type="int", - metavar="INTERVAL", - help="The number of seconds between server " - "exchanges.") - parser.add_option("--urgent-exchange-interval", default=1 * 60, - type="int", metavar="INTERVAL", - help="The number of seconds between urgent server " - "exchanges.") - parser.add_option("--ping-interval", default=30, type="int", - metavar="INTERVAL", - help="The number of seconds between pings.") - parser.add_option("--http-proxy", metavar="URL", - help="The URL of the HTTP proxy, if one is needed.") - parser.add_option("--https-proxy", metavar="URL", - help="The URL of the HTTPS proxy, if one is needed.") - parser.add_option("--access-group", default="", - help="Suggested access group for this computer.") - parser.add_option("--tags", - help="Comma separated list of tag names to be sent " - "to the server.") + parser.add_option( + "-a", + "--account-name", + metavar="NAME", + help="The account this computer belongs to.", + ) + parser.add_option( + "-p", + "--registration-key", + metavar="KEY", + help="The account-wide key used for " "registering clients.", + ) + parser.add_option( + "-t", + "--computer-title", + metavar="TITLE", + help="The title of this computer", + ) + parser.add_option( + "--exchange-interval", + default=15 * 60, + type="int", + metavar="INTERVAL", + help="The number of seconds between server " "exchanges.", + ) + parser.add_option( + "--urgent-exchange-interval", + default=1 * 60, + type="int", + metavar="INTERVAL", + help="The number of seconds between urgent server " "exchanges.", + ) + parser.add_option( + "--ping-interval", + default=30, + type="int", + metavar="INTERVAL", + help="The number of seconds between pings.", + ) + parser.add_option( + "--http-proxy", + metavar="URL", + help="The URL of the HTTP proxy, if one is needed.", + ) + parser.add_option( + "--https-proxy", + metavar="URL", + help="The URL of the HTTPS proxy, if one is needed.", + ) + parser.add_option( + "--access-group", + default="", + help="Suggested access group for this computer.", + ) + parser.add_option( + "--tags", + help="Comma separated list of tag names to be sent " + "to the server.", + ) return parser @@ -78,7 +109,7 @@ C{http_proxy} and C{https_proxy} environment variables based on that config data. """ - super(BrokerConfiguration, self).load(args) + super().load(args) if self.http_proxy: os.environ["http_proxy"] = self.http_proxy elif self._original_http_proxy: diff -Nru landscape-client-23.02/landscape/client/broker/exchange.py landscape-client-23.08/landscape/client/broker/exchange.py --- landscape-client-23.02/landscape/client/broker/exchange.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/exchange.py 2023-08-17 21:19:32.000000000 +0000 @@ -342,23 +342,28 @@ 14. Schedule exchange """ -import time import logging -from landscape.lib.hashlib import md5 +import time -from twisted.internet.defer import Deferred, succeed -from landscape.lib.compat import _PY3 +from twisted.internet.defer import Deferred +from twisted.internet.defer import succeed +from landscape import CLIENT_API +from landscape import DEFAULT_SERVER_API +from landscape import SERVER_API from landscape.lib.backoff import ExponentialBackoff -from landscape.lib.fetch import HTTPCodeError, PyCurlError +from landscape.lib.compat import _PY3 +from landscape.lib.fetch import HTTPCodeError +from landscape.lib.fetch import PyCurlError from landscape.lib.format import format_delta -from landscape.lib.message import got_next_expected, RESYNC -from landscape.lib.versioning import is_version_higher, sort_versions - -from landscape import DEFAULT_SERVER_API, SERVER_API, CLIENT_API +from landscape.lib.hashlib import md5 +from landscape.lib.message import got_next_expected +from landscape.lib.message import RESYNC +from landscape.lib.versioning import is_version_higher +from landscape.lib.versioning import sort_versions -class MessageExchange(object): +class MessageExchange: """Schedule and handle message exchanges with the server. The L{MessageExchange} is the place where messages are sent to go out @@ -376,8 +381,16 @@ # The highest server API that we are capable of speaking _api = SERVER_API - def __init__(self, reactor, store, transport, registration_info, - exchange_store, config, max_messages=100): + def __init__( + self, + reactor, + store, + transport, + registration_info, + exchange_store, + config, + max_messages=100, + ): """ @param reactor: The L{LandscapeReactor} used to fire events in response to messages received by the server. @@ -421,15 +434,16 @@ A message is considered obsolete if the secure ID changed since it was received. """ - if 'operation-id' not in message: + if "operation-id" not in message: return False - operation_id = message['operation-id'] + operation_id = message["operation-id"] context = self._exchange_store.get_message_context(operation_id) if context is None: logging.warning( - "No message context for message with operation-id: %s" - % operation_id) + "No message context for message with " + f"operation-id: {operation_id}", + ) return False # Compare the current secure ID with the one that was in effect when @@ -450,7 +464,7 @@ max_bytes = self._max_log_text_bytes if len(value) > max_bytes: value = value[:max_bytes] - value += '...MESSAGE TRUNCATED DUE TO SIZE' + value += "...MESSAGE TRUNCATED DUE TO SIZE" message[field] = value def send(self, message, urgent=False): @@ -463,14 +477,15 @@ """ if self._message_is_obsolete(message): logging.info( - "Response message with operation-id %s was discarded " - "because the client's secure ID has changed in the meantime" - % message.get('operation-id')) + "Response message with operation-id " + f"{message.get('operation-id')} was discarded " + "because the client's secure ID has changed in the meantime", + ) return None # These fields sometimes have really long output we need to trim - self.truncate_message_field('err', message) - self.truncate_message_field('result-text', message) + self.truncate_message_field("err", message) + self.truncate_message_field("result-text", message) if "timestamp" not in message: message["timestamp"] = int(self._reactor.time()) @@ -535,12 +550,16 @@ def _handle_set_intervals(self, message): if "exchange" in message: self._config.exchange_interval = message["exchange"] - logging.info("Exchange interval set to %d seconds." % - self._config.exchange_interval) + logging.info( + "Exchange interval set " + f"to {self._config.exchange_interval:d} seconds.", + ) if "urgent-exchange" in message: self._config.urgent_exchange_interval = message["urgent-exchange"] - logging.info("Urgent exchange interval set to %d seconds." % - self._config.urgent_exchange_interval) + logging.info( + "Urgent exchange interval set " + f"to {self._config.urgent_exchange_interval:d} seconds.", + ) self._config.write() def exchange(self): @@ -565,19 +584,24 @@ start_time = time.time() if self._urgent_exchange: - logging.info("Starting urgent message exchange with %s." - % self._transport.get_url()) + logging.info( + "Starting urgent message exchange " + f"with {self._transport.get_url()}.", + ) else: - logging.info("Starting message exchange with %s." - % self._transport.get_url()) + logging.info( + f"Starting message exchange with {self._transport.get_url()}.", + ) deferred = Deferred() def exchange_completed(): self.schedule_exchange(force=True) self._reactor.fire("exchange-done") - logging.info("Message exchange completed in %s.", - format_delta(time.time() - start_time)) + logging.info( + "Message exchange completed in %s.", + format_delta(time.time() - start_time), + ) deferred.callback(None) def handle_result(result): @@ -627,7 +651,7 @@ # The error returned is an SSL error, most likely the server # is using a self-signed certificate. Let's fire a special # event so that the GUI can display a nice message. - logging.error("Message exchange failed: %s" % error.message) + logging.error(f"Message exchange failed: {error.message}") ssl_error = True self._reactor.fire("exchange-failed", ssl_error=ssl_error) @@ -636,16 +660,19 @@ logging.info("Message exchange failed.") exchange_completed() - self._reactor.call_in_thread(handle_result, handle_failure, - self._transport.exchange, payload, - self._registration_info.secure_id, - self._get_exchange_token(), - payload.get("server-api")) + self._reactor.call_in_thread( + handle_result, + handle_failure, + self._transport.exchange, + payload, + self._registration_info.secure_id, + self._get_exchange_token(), + payload.get("server-api"), + ) return deferred def is_urgent(self): - """Return a bool showing whether there is an urgent exchange scheduled. - """ + """Return bool showing whether there is an urgent exchange scheduled""" return self._urgent_exchange def schedule_exchange(self, urgent=False, force=False): @@ -666,9 +693,12 @@ # The 'not self._exchanging' check below is currently untested. # It's a bit tricky to test as it is preventing rehooking 'exchange' # while there's a background thread doing the exchange itself. - if (not self._exchanging and - (force or self._exchange_id is None or - urgent and not self._urgent_exchange)): + if not self._exchanging and ( + force + or self._exchange_id is None + or urgent + and not self._urgent_exchange + ): if urgent: self._urgent_exchange = True if self._exchange_id: @@ -680,18 +710,24 @@ interval = self._config.exchange_interval backoff_delay = self._backoff_counter.get_random_delay() if backoff_delay: - logging.warning("Server is busy. Backing off client for {} " - "seconds".format(backoff_delay)) + logging.warning( + "Server is busy. Backing off client for {} " + "seconds".format(backoff_delay), + ) interval += backoff_delay if self._notification_id is not None: self._reactor.cancel_call(self._notification_id) notification_interval = interval - 10 self._notification_id = self._reactor.call_later( - notification_interval, self._notify_impending_exchange) + notification_interval, + self._notify_impending_exchange, + ) self._exchange_id = self._reactor.call_later( - interval, self.exchange) + interval, + self.exchange, + ) def _get_exchange_token(self): """Get the token given us by the server at the last exchange. @@ -743,13 +779,15 @@ del messages[i:] else: server_api = store.get_server_api() - payload = {"server-api": server_api, - "client-api": CLIENT_API, - "sequence": store.get_sequence(), - "accepted-types": accepted_types_digest, - "messages": messages, - "total-messages": total_messages, - "next-expected-sequence": store.get_server_sequence()} + payload = { + "server-api": server_api, + "client-api": CLIENT_API, + "sequence": store.get_sequence(), + "accepted-types": accepted_types_digest, + "messages": messages, + "total-messages": total_messages, + "next-expected-sequence": store.get_server_sequence(), + } accepted_client_types = self.get_client_accepted_message_types() accepted_client_types_hash = self._hash_types(accepted_client_types) if accepted_client_types_hash != self._client_accepted_types_hash: @@ -776,7 +814,8 @@ """ message_store = self._message_store self._client_accepted_types_hash = result.get( - "client-accepted-types-hash") + "client-accepted-types-hash", + ) next_expected = result.get("next-expected-sequence") old_sequence = message_store.get_sequence() if next_expected is None: @@ -793,8 +832,10 @@ # let's fire an event to tell all the plugins that they # ought to generate new messages so the server gets some # up-to-date data. - logging.info("Server asked for ancient data: resynchronizing all " - "state with the server.") + logging.info( + "Server asked for ancient data: resynchronizing all " + "state with the server.", + ) self.send({"type": "resynchronize"}) self._reactor.fire("resynchronize-clients") @@ -808,8 +849,9 @@ if new_uuid and isinstance(new_uuid, bytes): new_uuid = new_uuid.decode("ascii") if new_uuid != old_uuid: - logging.info("Server UUID changed (old=%s, new=%s)." - % (old_uuid, new_uuid)) + logging.info( + f"Server UUID changed (old={old_uuid}, new={new_uuid}).", + ) self._reactor.fire("server-uuid-changed", old_uuid, new_uuid) message_store.set_server_uuid(new_uuid) @@ -874,12 +916,14 @@ Any message handlers registered with L{register_message} will be called. """ - if 'operation-id' in message: + if "operation-id" in message: # This is a message that requires a response. Store the secure ID # so we can check for obsolete results later. self._exchange_store.add_message_context( - message['operation-id'], self._registration_info.secure_id, - message['type']) + message["operation-id"], + self._registration_info.secure_id, + message["type"], + ) self._reactor.fire("message", message) # This has plan interference! but whatever. @@ -903,9 +947,9 @@ stable_types = old_types & new_types removed_types = old_types - new_types diff = [] - diff.extend(["+%s" % type for type in added_types]) - diff.extend(["%s" % type for type in stable_types]) - diff.extend(["-%s" % type for type in removed_types]) + diff.extend([f"+{typ}" for typ in added_types]) + diff.extend([f"{typ}" for typ in stable_types]) + diff.extend([f"-{typ}" for typ in removed_types]) return " ".join(diff) diff -Nru landscape-client-23.02/landscape/client/broker/exchangestore.py landscape-client-23.08/landscape/client/broker/exchangestore.py --- landscape-client-23.02/landscape/client/broker/exchangestore.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/exchangestore.py 2023-08-17 21:19:32.000000000 +0000 @@ -9,7 +9,7 @@ from landscape.lib.store import with_cursor -class MessageContext(object): +class MessageContext: """Stores a context for incoming messages that require a response. The context consists of @@ -39,10 +39,11 @@ def remove(self, cursor): cursor.execute( "DELETE FROM message_context WHERE operation_id=?", - (self.operation_id,)) + (self.operation_id,), + ) -class ExchangeStore(object): +class ExchangeStore: """Message meta data required by the L{MessageExchange}. The implementation uses a SQLite database as backend, with a single table @@ -51,6 +52,7 @@ @param filename: The name of the file that contains the sqlite database. """ + _db = None def __init__(self, filename): @@ -61,13 +63,20 @@ @with_cursor def add_message_context( - self, cursor, operation_id, secure_id, message_type): + self, + cursor, + operation_id, + secure_id, + message_type, + ): """Add a L{MessageContext} with the given data.""" params = (operation_id, secure_id, message_type, time.time()) cursor.execute( "INSERT INTO message_context " " (operation_id, secure_id, message_type, timestamp) " - " VALUES (?,?,?,?)", params) + " VALUES (?,?,?,?)", + params, + ) return MessageContext(self._db, *params) @with_cursor @@ -75,7 +84,9 @@ """The L{MessageContext} for the given C{operation_id} or C{None}.""" cursor.execute( "SELECT operation_id, secure_id, message_type, timestamp " - "FROM message_context WHERE operation_id=?", (operation_id,)) + "FROM message_context WHERE operation_id=?", + (operation_id,), + ) row = cursor.fetchone() if row: return MessageContext(self._db, *row) @@ -101,10 +112,12 @@ "CREATE TABLE message_context" " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP, " " secure_id TEXT NOT NULL, operation_id INTEGER NOT NULL, " - " message_type text NOT NULL)") + " message_type text NOT NULL)", + ) cursor.execute( "CREATE UNIQUE INDEX msgctx_operationid_idx ON " - "message_context(operation_id)") + "message_context(operation_id)", + ) except (sqlite3.OperationalError, sqlite3.DatabaseError): cursor.close() db.rollback() diff -Nru landscape-client-23.02/landscape/client/broker/ping.py landscape-client-23.08/landscape/client/broker/ping.py --- landscape-client-23.02/landscape/client/broker/ping.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/ping.py 2023-08-17 21:19:32.000000000 +0000 @@ -48,7 +48,7 @@ from landscape.lib.log import log_failure -class PingClient(object): +class PingClient: """An HTTP client which knows how to talk to the ping server.""" def __init__(self, reactor, get_page=None): @@ -74,10 +74,16 @@ def errback(type, value, tb): page_deferred.errback(Failure(value, type, tb)) - self._reactor.call_in_thread(page_deferred.callback, errback, - self.get_page, url, - post=True, data=data, - headers=headers) + + self._reactor.call_in_thread( + page_deferred.callback, + errback, + self.get_page, + url, + post=True, + data=data, + headers=headers, + ) page_deferred.addCallback(self._got_result) return page_deferred return defer.succeed(False) @@ -92,7 +98,7 @@ return True -class Pinger(object): +class Pinger: """ A plugin which pings the Landscape server with HTTP requests to see if a full exchange should be initiated. @@ -107,8 +113,14 @@ scheduled ping. """ - def __init__(self, reactor, identity, exchanger, config, - ping_client_factory=PingClient): + def __init__( + self, + reactor, + identity, + exchanger, + config, + ping_client_factory=PingClient, + ): self._config = config self._identity = identity self._reactor = reactor @@ -132,33 +144,42 @@ def ping(self): """Perform a ping; if there are messages, fire an exchange.""" deferred = self._ping_client.ping( - self._config.ping_url, self._identity.insecure_id) + self._config.ping_url, + self._identity.insecure_id, + ) deferred.addCallback(self._got_result) deferred.addErrback(self._got_error) deferred.addBoth(lambda _: self._schedule()) def _got_result(self, exchange): if exchange: - info("Ping indicates message available. " - "Scheduling an urgent exchange.") + info( + "Ping indicates message available. " + "Scheduling an urgent exchange.", + ) self._exchanger.schedule_exchange(urgent=True) def _got_error(self, failure): - log_failure(failure, - "Error contacting ping server at %s" % - (self._ping_client.url,)) + log_failure( + failure, + f"Error contacting ping server at {self._ping_client.url}", + ) def _schedule(self): """Schedule a new ping using the current ping interval.""" - self._call_id = self._reactor.call_later(self._config.ping_interval, - self.ping) + self._call_id = self._reactor.call_later( + self._config.ping_interval, + self.ping, + ) def _handle_set_intervals(self, message): if message["type"] == "set-intervals" and "ping" in message: self._config.ping_interval = message["ping"] self._config.write() - info("Ping interval set to %d seconds." % - self._config.ping_interval) + info( + f"Ping interval set to {self._config.ping_interval:d} " + "seconds.", + ) if self._call_id is not None: self._reactor.cancel_call(self._call_id) self._schedule() @@ -170,8 +191,7 @@ self._call_id = None -class FakePinger(object): - +class FakePinger: def __init__(self, *args, **kwargs): pass diff -Nru landscape-client-23.02/landscape/client/broker/registration.py landscape-client-23.08/landscape/client/broker/registration.py --- landscape-client-23.02/landscape/client/broker/registration.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/registration.py 2023-08-17 21:19:32.000000000 +0000 @@ -16,10 +16,11 @@ from landscape.client.broker.exchange import maybe_bytes from landscape.client.monitor.ubuntuproinfo import get_ubuntu_pro_info from landscape.lib.juju import get_juju_info -from landscape.lib.tag import is_valid_tag_list from landscape.lib.network import get_fqdn -from landscape.lib.vm_info import get_vm_info, get_container_info +from landscape.lib.tag import is_valid_tag_list from landscape.lib.versioning import is_version_higher +from landscape.lib.vm_info import get_container_info +from landscape.lib.vm_info import get_vm_info class RegistrationError(Exception): @@ -31,7 +32,6 @@ def persist_property(name): - def get(self): value = self._persist.get(name) try: @@ -46,14 +46,13 @@ def config_property(name): - def get(self): return getattr(self._config, name) return property(get) -class Identity(object): +class Identity: """Maintains details about the identity of this Landscape client. @ivar secure_id: A server-provided ID for secure message exchange. @@ -83,7 +82,7 @@ self._persist = persist.root_at("registration") -class RegistrationHandler(object): +class RegistrationHandler: """ An object from which registration can be requested of the server, and which will handle forced ID changes from the server. @@ -91,8 +90,16 @@ L{register} should be used to perform initial registration. """ - def __init__(self, config, identity, reactor, exchange, pinger, - message_store, fetch_async=None): + def __init__( + self, + config, + identity, + reactor, + exchange, + pinger, + message_store, + fetch_async=None, + ): self._config = config self._identity = identity self._reactor = reactor @@ -104,8 +111,10 @@ self._reactor.call_on("exchange-done", self._handle_exchange_done) self._exchange.register_message("set-id", self._handle_set_id) self._exchange.register_message("unknown-id", self._handle_unknown_id) - self._exchange.register_message("registration", - self._handle_registration) + self._exchange.register_message( + "registration", + self._handle_registration, + ) self._should_register = None self._fetch_async = fetch_async self._juju_data = None @@ -116,9 +125,11 @@ if id.secure_id: return False - return bool(id.computer_title and - id.account_name and - self._message_store.accepts("register")) + return bool( + id.computer_title + and id.account_name + and self._message_store.accepts("register"), + ) def register(self): """ @@ -186,14 +197,16 @@ tags = None logging.error("Invalid tags provided for registration.") - message = {"type": "register", - "hostname": get_fqdn(), - "account_name": account_name, - "computer_title": identity.computer_title, - "registration_password": identity.registration_key, - "tags": tags, - "container-info": get_container_info(), - "vm-info": get_vm_info()} + message = { + "type": "register", + "hostname": get_fqdn(), + "account_name": account_name, + "computer_title": identity.computer_title, + "registration_password": identity.registration_key, + "tags": tags, + "container-info": get_container_info(), + "vm-info": get_vm_info(), + } if self._clone_secure_id: # We use the secure id here because the registration is encrypted @@ -216,19 +229,20 @@ message["juju-info"] = { "environment-uuid": self._juju_data["environment-uuid"], "api-addresses": self._juju_data["api-addresses"], - "machine-id": self._juju_data["machine-id"]} + "machine-id": self._juju_data["machine-id"], + } # The computer is a normal computer, possibly a container. with_word = "with" if bool(registration_key) else "without" - with_tags = "and tags %s " % tags if tags else "" - with_group = "in access group '%s' " % group if group else "" + with_tags = f"and tags {tags} " if tags else "" + with_group = f"in access group '{group}' " if group else "" message["ubuntu_pro_info"] = json.dumps(get_ubuntu_pro_info()) logging.info( - u"Queueing message to register with account %r %s%s" - "%s a password." % ( - account_name, with_group, with_tags, with_word)) + f"Queueing message to register with account {account_name!r} " + f"{with_group}{with_tags}{with_word} a password.", + ) self._exchange.send(message) def _handle_set_id(self, message): @@ -239,15 +253,18 @@ Fire C{"registration-done"} and C{"resynchronize-clients"}. """ - id = self._identity - if id.secure_id: - logging.info("Overwriting secure_id with '%s'" % id.secure_id) + cid = self._identity + if cid.secure_id: + logging.info(f"Overwriting secure_id with '{cid.secure_id}'") - id.secure_id = message.get("id") - id.insecure_id = message.get("insecure-id") - logging.info("Using new secure-id ending with %s for account %s.", - id.secure_id[-10:], id.account_name) - logging.debug("Using new secure-id: %s", id.secure_id) + cid.secure_id = message.get("id") + cid.insecure_id = message.get("insecure-id") + logging.info( + "Using new secure-id ending with %s for account %s.", + cid.secure_id[-10:], + cid.account_name, + ) + logging.debug("Using new secure-id: %s", cid.secure_id) self._reactor.fire("registration-done") self._reactor.fire("resynchronize-clients") @@ -257,19 +274,21 @@ self._reactor.fire("registration-failed", reason=message_info) def _handle_unknown_id(self, message): - id = self._identity + cid = self._identity clone = message.get("clone-of") if clone is None: - logging.info("Client has unknown secure-id for account %s." - % id.account_name) + logging.info( + "Client has unknown secure-id for account " + f"{cid.account_name}.", + ) else: # Save the secure id as the clone, and clear it so it's renewed - logging.info("Client is clone of computer %s" % clone) - self._clone_secure_id = id.secure_id - id.secure_id = None - id.insecure_id = None + logging.info(f"Client is clone of computer {clone}") + self._clone_secure_id = cid.secure_id + cid.secure_id = None + cid.insecure_id = None -class RegistrationResponse(object): +class RegistrationResponse: """A helper for dealing with the response of a single registration request. @ivar deferred: The L{Deferred} that will be fired as per diff -Nru landscape-client-23.02/landscape/client/broker/server.py landscape-client-23.08/landscape/client/broker/server.py --- landscape-client-23.02/landscape/client/broker/server.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/server.py 2023-08-17 21:19:32.000000000 +0000 @@ -42,15 +42,14 @@ : exchange """ - import logging from twisted.internet.defer import Deferred -from landscape.lib.compat import _PY3 -from landscape.lib.twisted_util import gather_results from landscape.client.amp import remote from landscape.client.manager.manager import FAILED +from landscape.lib.compat import _PY3 +from landscape.lib.twisted_util import gather_results def event(method): @@ -71,7 +70,7 @@ return broadcast_event -class BrokerServer(object): +class BrokerServer: """ A broker server capable of handling messages from plugins connected using the L{BrokerProtocol}. @@ -82,11 +81,20 @@ @param registration: The {RegistrationHandler}. @param message_store: The broker's L{MessageStore}. """ + name = "broker" - def __init__(self, config, reactor, exchange, registration, - message_store, pinger): + def __init__( + self, + config, + reactor, + exchange, + registration, + message_store, + pinger, + ): from landscape.client.broker.amp import get_component_registry + self.connectors_registry = get_component_registry() self._config = config self._reactor = reactor @@ -99,8 +107,10 @@ reactor.call_on("message", self.broadcast_message) reactor.call_on("impending-exchange", self.impending_exchange) - reactor.call_on("message-type-acceptance-changed", - self.message_type_acceptance_changed) + reactor.call_on( + "message-type-acceptance-changed", + self.message_type_acceptance_changed, + ) reactor.call_on("server-uuid-changed", self.server_uuid_changed) reactor.call_on("package-data-changed", self.package_data_changed) reactor.call_on("resynchronize-clients", self.resynchronize) @@ -201,7 +211,9 @@ message = {k.decode("ascii"): v for k, v in message.items()} message["type"] = message["type"].decode("ascii") if isinstance(session_id, bool) and message["type"] in ( - "operation-result", "change-packages-result"): + "operation-result", + "change-packages-result", + ): # XXX This means we're performing a Landscape-driven upgrade and # we're being invoked by a package-changer or release-upgrader # process that is running code which doesn't know about the @@ -218,7 +230,8 @@ if session_id is None: raise RuntimeError( - "Session ID must be set before attempting to send a message") + "Session ID must be set before attempting to send a message", + ) if self._message_store.is_valid_session_id(session_id): return self._exchanger.send(message, urgent=urgent) @@ -320,7 +333,6 @@ calls = [] def get_handler(event_type): - def handler(**kwargs): for call in calls: self._reactor.cancel_call(call) @@ -367,26 +379,30 @@ indicating as such. """ opid = message.get("operation-id") - if (True not in results and - opid is not None and - message["type"] != "resynchronize" - ): + if ( + True not in results + and opid is not None + and message["type"] != "resynchronize" + ): mtype = message["type"] - logging.error("Nobody handled the %s message." % (mtype,)) + logging.error(f"Nobody handled the {mtype} message.") result_text = """\ -Landscape client failed to handle this request (%s) because the +Landscape client failed to handle this request ({}) because the plugin which should handle it isn't available. This could mean that the plugin has been intentionally disabled, or that the client isn't running properly, or you may be running an older version of the client that doesn't support this feature. -""" % (mtype,) +""".format( + mtype, + ) response = { "type": "operation-result", "status": FAILED, "result-text": result_text, - "operation-id": opid} + "operation-id": opid, + } self._exchanger.send(response, urgent=True) @remote diff -Nru landscape-client-23.02/landscape/client/broker/service.py landscape-client-23.08/landscape/client/broker/service.py --- landscape-client-23.02/landscape/client/broker/service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,19 @@ """Deployment code for the monitor.""" - import os -from landscape.client.service import LandscapeService, run_landscape_service from landscape.client.amp import ComponentPublisher -from landscape.client.broker.registration import RegistrationHandler, Identity from landscape.client.broker.config import BrokerConfiguration -from landscape.client.broker.transport import HTTPTransport from landscape.client.broker.exchange import MessageExchange from landscape.client.broker.exchangestore import ExchangeStore from landscape.client.broker.ping import Pinger -from landscape.client.broker.store import get_default_message_store +from landscape.client.broker.registration import Identity +from landscape.client.broker.registration import RegistrationHandler from landscape.client.broker.server import BrokerServer +from landscape.client.broker.store import get_default_message_store +from landscape.client.broker.transport import HTTPTransport +from landscape.client.service import LandscapeService +from landscape.client.service import run_landscape_service +from landscape.client.watchdog import bootstrap_list class BrokerService(LandscapeService): @@ -44,48 +46,82 @@ service_name = BrokerServer.name def __init__(self, config): + self._config = config self.persist_filename = os.path.join( - config.data_path, "%s.bpickle" % (self.service_name,)) - super(BrokerService, self).__init__(config) + config.data_path, + f"{self.service_name}.bpickle", + ) + super().__init__(config) self.transport = self.transport_factory( - self.reactor, config.url, config.ssl_public_key) + self.reactor, + config.url, + config.ssl_public_key, + ) self.message_store = get_default_message_store( - self.persist, config.message_store_path) + self.persist, + config.message_store_path, + ) self.identity = Identity(self.config, self.persist) exchange_store = ExchangeStore(self.config.exchange_store_path) self.exchanger = MessageExchange( - self.reactor, self.message_store, self.transport, self.identity, - exchange_store, config) + self.reactor, + self.message_store, + self.transport, + self.identity, + exchange_store, + config, + ) self.pinger = self.pinger_factory( - self.reactor, self.identity, self.exchanger, config) + self.reactor, + self.identity, + self.exchanger, + config, + ) self.registration = RegistrationHandler( - config, self.identity, self.reactor, self.exchanger, self.pinger, - self.message_store) - self.broker = BrokerServer(self.config, self.reactor, self.exchanger, - self.registration, self.message_store, - self.pinger) - self.publisher = ComponentPublisher(self.broker, self.reactor, - self.config) + config, + self.identity, + self.reactor, + self.exchanger, + self.pinger, + self.message_store, + ) + self.broker = BrokerServer( + self.config, + self.reactor, + self.exchanger, + self.registration, + self.message_store, + self.pinger, + ) + self.publisher = ComponentPublisher( + self.broker, + self.reactor, + self.config, + ) - def startService(self): + def startService(self): # noqa: N802 """Start the broker. Create a L{BrokerServer} listening on C{broker_socket_path} for clients connecting with the L{BrokerServerConnector}, and start the L{MessageExchange} and L{Pinger} services. """ - super(BrokerService, self).startService() + super().startService() + bootstrap_list.bootstrap( + data_path=self._config.data_path, + log_dir=self._config.log_dir, + ) self.publisher.start() self.exchanger.start() self.pinger.start() - def stopService(self): + def stopService(self): # noqa: N802 """Stop the broker.""" deferred = self.publisher.stop() self.exchanger.stop() self.pinger.stop() - super(BrokerService, self).stopService() + super().stopService() return deferred diff -Nru landscape-client-23.02/landscape/client/broker/store.py landscape-client-23.08/landscape/client/broker/store.py --- landscape-client-23.02/landscape/client/broker/store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/store.py 2023-08-17 21:19:32.000000000 +0000 @@ -91,25 +91,28 @@ system and L{landscape.lib.message.got_next_expected} to check how the strategy for updating the pending offset and the sequence is implemented. """ - import itertools import logging import os +import shutil +import traceback import uuid from twisted.python.compat import iteritems from landscape import DEFAULT_SERVER_API from landscape.lib import bpickle -from landscape.lib.fs import create_binary_file, read_binary_file -from landscape.lib.versioning import sort_versions, is_version_higher +from landscape.lib.fs import create_binary_file +from landscape.lib.fs import read_binary_file +from landscape.lib.versioning import is_version_higher +from landscape.lib.versioning import sort_versions HELD = "h" BROKEN = "b" -class MessageStore(object): +class MessageStore: """A message store which stores its messages in a file system hierarchy. Beside the "sequence" and the "pending offset" values described in the @@ -134,9 +137,12 @@ # in case the server supports it. _api = DEFAULT_SERVER_API - def __init__(self, persist, directory, directory_size=1000): + def __init__(self, persist, directory, directory_size=1000, max_dirs=4, + max_size_mb=400): self._directory = directory self._directory_size = directory_size + self._max_dirs = max_dirs # Maximum number of directories in store + self._max_size_mb = max_size_mb # Maximum size of message store self._schemas = {} self._original_persist = persist self._persist = persist.root_at("message-store") @@ -273,7 +279,7 @@ logging.exception(e) self._add_flags(filename, BROKEN) else: - if u"type" not in message: + if "type" not in message: # Special case to decode keys for messages which were # serialized by py27 prior to py3 upgrade, and having # implicit byte message keys. Message may still get @@ -281,8 +287,9 @@ # broker. (lp: #1718689) message = { (k if isinstance(k, str) else k.decode("ascii")): v - for k, v in message.items()} - message[u"type"] = message[u"type"].decode("ascii") + for k, v in message.items() + } + message["type"] = message["type"].decode("ascii") unknown_type = message["type"] not in accepted_types unknown_api = not is_version_higher(server_api, message["api"]) @@ -292,10 +299,53 @@ messages.append(message) return messages + def get_messages_total_size(self): + """Get total size of messages directory""" + sizes = [] + for dirname in os.listdir(self._directory): + dirpath = os.path.join(self._directory, dirname) + dirsize = sum(file.stat().st_size for file in os.scandir(dirpath)) + sizes.append(dirsize) + return sum(sizes) + + def delete_messages_over_limit(self): + """ + Delete messages dirs if there's any over the max, which happens if + messages are queued up but not able to be sent + """ + + cur_dirs = os.listdir(self._directory) + cur_dirs.sort(key=int) # Since you could have 0, .., 9, 10 + num_dirs = len(cur_dirs) + + num_dirs_to_delete = max(0, num_dirs - self._max_dirs) # No negatives + dirs_to_delete = cur_dirs[:num_dirs_to_delete] # Chop off beginning + + for dirname in dirs_to_delete: + dirpath = os.path.join(self._directory, dirname) + try: + logging.debug(f"Trimming message store: {dirpath}") + shutil.rmtree(dirpath) + except Exception: # We want to continue like normal if any error + logging.warning(traceback.format_exc()) + logging.warning("Unable to delete message directory!") + logging.warning(dirpath) + + # Something is wrong if after deleting a bunch of files, we are still + # using too much space. Rather then look around for big files, we just + # start over. + num_bytes = self.get_messages_total_size() + num_mb = num_bytes / 1e6 + if num_mb > self._max_size_mb: + logging.warning("Messages too large! Clearing all messages!") + self.delete_all_messages() + def delete_old_messages(self): """Delete messages which are unlikely to be needed in the future.""" - for fn in itertools.islice(self._walk_messages(exclude=HELD + BROKEN), - self.get_pending_offset()): + for fn in itertools.islice( + self._walk_messages(exclude=HELD + BROKEN), + self.get_pending_offset(), + ): os.unlink(fn) containing_dir = os.path.split(fn)[0] if not os.listdir(containing_dir): @@ -326,9 +376,9 @@ pending_offset = self.get_pending_offset() for filename in self._walk_messages(exclude=BROKEN): flags = self._get_flags(filename) - if ((HELD in flags or i >= pending_offset) and - os.stat(filename).st_ino == message_id - ): + if (HELD in flags or i >= pending_offset) and os.stat( + filename, + ).st_ino == message_id: return True if BROKEN not in flags and HELD not in flags: i += 1 @@ -347,7 +397,8 @@ if not self._persist.has("first-failure-time"): self._persist.set("first-failure-time", timestamp) continued_failure_time = timestamp - self._persist.get( - "first-failure-time") + "first-failure-time", + ) if self._persist.get("blackhole-messages"): # Already added the resync message return @@ -357,7 +408,8 @@ self._persist.set("blackhole-messages", True) logging.warning( "Unable to succesfully communicate with Landscape server " - "for more than a week. Waiting for resync.") + "for more than a week. Waiting for resync.", + ) def add(self, message): """Queue a message for delivery. @@ -373,6 +425,8 @@ logging.debug("Dropped message, awaiting resync.") return + self.delete_messages_over_limit() + server_api = self.get_server_api() if "api" not in message: @@ -432,7 +486,8 @@ """Walk the files which are definitely pending.""" pending_offset = self.get_pending_offset() for i, filename in enumerate( - self._walk_messages(exclude=HELD + BROKEN)): + self._walk_messages(exclude=HELD + BROKEN), + ): if i >= pending_offset: yield filename @@ -443,12 +498,15 @@ for message_dir in message_dirs: for filename in self._get_sorted_filenames(message_dir): flags = set(self._get_flags(filename)) - if (not exclude or not exclude & flags): + if not exclude or not exclude & flags: yield self._message_dir(message_dir, filename) def _get_sorted_filenames(self, dir=""): - message_files = [x for x in os.listdir(self._message_dir(dir)) - if not x.endswith(".tmp")] + message_files = [ + x + for x in os.listdir(self._message_dir(dir)) + if not x.endswith(".tmp") + ] message_files.sort(key=lambda x: int(x.split("_")[0])) return message_files @@ -549,6 +607,7 @@ Get a L{MessageStore} object with all Landscape message schemas added. """ from landscape.message_schemas.server_bound import message_schemas + store = MessageStore(*args, **kwargs) for schema in message_schemas: store.add_schema(schema) diff -Nru landscape-client-23.02/landscape/client/broker/tests/helpers.py landscape-client-23.08/landscape/client/broker/tests/helpers.py --- landscape-client-23.02/landscape/client/broker/tests/helpers.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/helpers.py 2023-08-17 21:19:32.000000000 +0000 @@ -7,23 +7,24 @@ """ import os -from landscape.lib.persist import Persist -from landscape.lib.testing import FakeReactor -from landscape.client.watchdog import bootstrap_list from landscape.client.amp import ComponentPublisher -from landscape.client.broker.transport import FakeTransport +from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.broker.client import BrokerClient +from landscape.client.broker.config import BrokerConfiguration from landscape.client.broker.exchange import MessageExchange from landscape.client.broker.exchangestore import ExchangeStore -from landscape.client.broker.store import get_default_message_store -from landscape.client.broker.registration import Identity, RegistrationHandler from landscape.client.broker.ping import Pinger -from landscape.client.broker.config import BrokerConfiguration +from landscape.client.broker.registration import Identity +from landscape.client.broker.registration import RegistrationHandler from landscape.client.broker.server import BrokerServer -from landscape.client.broker.amp import RemoteBrokerConnector -from landscape.client.broker.client import BrokerClient +from landscape.client.broker.store import get_default_message_store +from landscape.client.broker.transport import FakeTransport +from landscape.client.watchdog import bootstrap_list +from landscape.lib.persist import Persist +from landscape.lib.testing import FakeReactor -class BrokerConfigurationHelper(object): +class BrokerConfigurationHelper: """Setup a L{BrokerConfiguration} instance with some test config values. The following attributes will be set on your test case: @@ -37,8 +38,10 @@ def set_up(self, test_case): data_path = test_case.makeDir() log_dir = test_case.makeDir() - test_case.config_filename = os.path.join(test_case.makeDir(), - "client.conf") + test_case.config_filename = os.path.join( + test_case.makeDir(), + "client.conf", + ) with open(test_case.config_filename, "w") as fh: fh.write( @@ -47,8 +50,9 @@ "computer_title = Some Computer\n" "account_name = some_account\n" "ping_url = http://localhost:91910\n" - "data_path = %s\n" - "log_dir = %s\n" % (data_path, log_dir)) + f"data_path = {data_path}\n" + f"log_dir = {log_dir}\n", + ) bootstrap_list.bootstrap(data_path=data_path, log_dir=log_dir) @@ -87,20 +91,31 @@ """ def set_up(self, test_case): - super(ExchangeHelper, self).set_up(test_case) + super().set_up(test_case) test_case.persist_filename = test_case.makePersistFile() test_case.persist = Persist(filename=test_case.persist_filename) test_case.mstore = get_default_message_store( - test_case.persist, test_case.config.message_store_path) + test_case.persist, + test_case.config.message_store_path, + ) test_case.identity = Identity(test_case.config, test_case.persist) - test_case.transport = FakeTransport(None, test_case.config.url, - test_case.config.ssl_public_key) + test_case.transport = FakeTransport( + None, + test_case.config.url, + test_case.config.ssl_public_key, + ) test_case.reactor = FakeReactor() test_case.exchange_store = ExchangeStore( - test_case.config.exchange_store_path) + test_case.config.exchange_store_path, + ) test_case.exchanger = MessageExchange( - test_case.reactor, test_case.mstore, test_case.transport, - test_case.identity, test_case.exchange_store, test_case.config) + test_case.reactor, + test_case.mstore, + test_case.transport, + test_case.identity, + test_case.exchange_store, + test_case.config, + ) class RegistrationHelper(ExchangeHelper): @@ -116,16 +131,27 @@ """ def set_up(self, test_case): - super(RegistrationHelper, self).set_up(test_case) - test_case.pinger = Pinger(test_case.reactor, test_case.identity, - test_case.exchanger, test_case.config) + super().set_up(test_case) + test_case.pinger = Pinger( + test_case.reactor, + test_case.identity, + test_case.exchanger, + test_case.config, + ) test_case.config.cloud = getattr(test_case, "cloud", False) if hasattr(test_case, "juju_contents"): test_case.makeFile( - test_case.juju_contents, path=test_case.config.juju_filename) + test_case.juju_contents, + path=test_case.config.juju_filename, + ) test_case.handler = RegistrationHandler( - test_case.config, test_case.identity, test_case.reactor, - test_case.exchanger, test_case.pinger, test_case.mstore) + test_case.config, + test_case.identity, + test_case.reactor, + test_case.exchanger, + test_case.pinger, + test_case.mstore, + ) class BrokerServerHelper(RegistrationHelper): @@ -139,10 +165,15 @@ """ def set_up(self, test_case): - super(BrokerServerHelper, self).set_up(test_case) - test_case.broker = BrokerServer(test_case.config, test_case.reactor, - test_case.exchanger, test_case.handler, - test_case.mstore, test_case.pinger) + super().set_up(test_case) + test_case.broker = BrokerServer( + test_case.config, + test_case.reactor, + test_case.exchanger, + test_case.handler, + test_case.mstore, + test_case.pinger, + ) class RemoteBrokerHelper(BrokerServerHelper): @@ -168,13 +199,17 @@ """ def set_up(self, test_case): - super(RemoteBrokerHelper, self).set_up(test_case) + super().set_up(test_case) - self._publisher = ComponentPublisher(test_case.broker, - test_case.reactor, - test_case.config) - self._connector = RemoteBrokerConnector(test_case.reactor, - test_case.config) + self._publisher = ComponentPublisher( + test_case.broker, + test_case.reactor, + test_case.config, + ) + self._connector = RemoteBrokerConnector( + test_case.reactor, + test_case.config, + ) self._publisher.start() deferred = self._connector.connect() @@ -183,7 +218,7 @@ def tear_down(self, test_case): self._connector.disconnect() self._publisher.stop() - super(RemoteBrokerHelper, self).tear_down(test_case) + super().tear_down(test_case) class BrokerClientHelper(RemoteBrokerHelper): @@ -204,7 +239,7 @@ """ def set_up(self, test_case): - super(BrokerClientHelper, self).set_up(test_case) + super().set_up(test_case) # The client needs its own reactor to avoid infinite loops # when the broker broadcasts and event test_case.client_reactor = FakeReactor() @@ -227,10 +262,12 @@ """ def set_up(self, test_case): - super(RemoteClientHelper, self).set_up(test_case) - self._client_publisher = ComponentPublisher(test_case.client, - test_case.reactor, - test_case.config) + super().set_up(test_case) + self._client_publisher = ComponentPublisher( + test_case.client, + test_case.reactor, + test_case.config, + ) self._client_publisher.start() test_case.remote.register_client("client") test_case.remote_client = test_case.broker.get_client("client") @@ -239,4 +276,4 @@ def tear_down(self, test_case): self._client_connector.disconnect() self._client_publisher.stop() - super(RemoteClientHelper, self).tear_down(test_case) + super().tear_down(test_case) diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_amp.py landscape-client-23.08/landscape/client/broker/tests/test_amp.py --- landscape-client-23.02/landscape/client/broker/tests/test_amp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_amp.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,10 @@ -import mock +from unittest import mock +from landscape.client.broker.tests.helpers import RemoteBrokerHelper +from landscape.client.broker.tests.helpers import RemoteClientHelper +from landscape.client.tests.helpers import DEFAULT_ACCEPTED_TYPES +from landscape.client.tests.helpers import LandscapeTest from landscape.lib.amp import MethodCallError -from landscape.client.tests.helpers import ( - LandscapeTest, DEFAULT_ACCEPTED_TYPES) -from landscape.client.broker.tests.helpers import ( - RemoteBrokerHelper, RemoteClientHelper) class RemoteBrokerTest(LandscapeTest): @@ -41,13 +41,13 @@ session_id = self.successResultOf(self.remote.get_session_id()) message_id = self.successResultOf( - self.remote.send_message(message, session_id)) + self.remote.send_message(message, session_id), + ) self.assertTrue(isinstance(message_id, int)) self.assertTrue(self.mstore.is_pending(message_id)) self.assertFalse(self.exchanger.is_urgent()) - self.assertMessages(self.mstore.get_pending_messages(), - [message]) + self.assertMessages(self.mstore.get_pending_messages(), [message]) def test_send_message_with_urgent(self): """ @@ -56,8 +56,9 @@ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) session_id = self.successResultOf(self.remote.get_session_id()) - message_id = self.successResultOf(self.remote.send_message( - message, session_id, urgent=True)) + message_id = self.successResultOf( + self.remote.send_message(message, session_id, urgent=True), + ) self.assertTrue(isinstance(message_id, int)) self.assertTrue(self.exchanger.is_urgent()) @@ -95,8 +96,9 @@ a L{Deferred}. """ # This should make the registration succeed - self.transport.responses.append([{"type": "set-id", "id": "abc", - "insecure-id": "def"}]) + self.transport.responses.append( + [{"type": "set-id", "id": "abc", "insecure-id": "def"}], + ) result = self.remote.register() return self.assertSuccess(result, None) @@ -130,7 +132,8 @@ self.assertEqual(response, None) self.assertEqual( self.exchanger.get_client_accepted_message_types(), - sorted(["type"] + DEFAULT_ACCEPTED_TYPES)) + sorted(["type"] + DEFAULT_ACCEPTED_TYPES), + ) result = self.remote.register_client_accepted_message_type("type") return result.addCallback(assert_response) @@ -159,8 +162,11 @@ The L{RemoteBroker.call_if_accepted} method doesn't do anything if the given message type is not accepted. """ - function = (lambda: 1 / 0) - result = self.remote.call_if_accepted("test", function) + + def division_by_zero(): + raise ZeroDivisionError() + + result = self.remote.call_if_accepted("test", division_by_zero) return self.assertSuccess(result, None) def test_listen_events(self): @@ -181,8 +187,9 @@ """ callback1 = mock.Mock() callback2 = mock.Mock(return_value=123) - deferred = self.remote.call_on_event({"event1": callback1, - "event2": callback2}) + deferred = self.remote.call_on_event( + {"event1": callback1, "event2": callback2}, + ) self.reactor.call_later(0.05, self.reactor.fire, "event2") self.reactor.advance(0.05) self.remote._factory.fake_connection.flush() @@ -228,8 +235,10 @@ a L{Deferred}. """ handler = mock.Mock() - with mock.patch.object(self.client.broker, - "register_client_accepted_message_type") as m: + with mock.patch.object( + self.client.broker, + "register_client_accepted_message_type", + ) as m: # We need to register a test message handler to let the dispatch # message method call succeed self.client.register_message("test", handler) diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_client.py landscape-client-23.08/landscape/client/broker/tests/test_client.py --- landscape-client-23.02/landscape/client/broker/tests/test_client.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_client.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,14 @@ -import mock +from unittest import mock from twisted.internet import reactor from twisted.internet.defer import Deferred -from landscape.lib.twisted_util import gather_results -from landscape.client.tests.helpers import ( - LandscapeTest, DEFAULT_ACCEPTED_TYPES) +from landscape.client.broker.client import BrokerClientPlugin +from landscape.client.broker.client import HandlerNotFoundError from landscape.client.broker.tests.helpers import BrokerClientHelper -from landscape.client.broker.client import ( - BrokerClientPlugin, HandlerNotFoundError) +from landscape.client.tests.helpers import DEFAULT_ACCEPTED_TYPES +from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.twisted_util import gather_results class BrokerClientTest(LandscapeTest): @@ -45,7 +45,8 @@ getting a session id. """ test_session_id = self.successResultOf( - self.client.broker.get_session_id(scope="test")) + self.client.broker.get_session_id(scope="test"), + ) plugin = BrokerClientPlugin() plugin.scope = "test" self.client.add(plugin) @@ -130,8 +131,10 @@ If a plugin has a run method, the reactor will call it every run_interval, but will stop and log if it raises unhandled exceptions. """ + class RunFailure(Exception): pass + # log helper should not complain on the error we're testing self.log_helper.ignore_errors("BrokerClientPlugin.*") plugin = BrokerClientPlugin() @@ -146,7 +149,8 @@ # message entry that would be present on a live client. self.assertIn( "ERROR: BrokerClientPlugin raised an uncaught exception", - self.logfile.getvalue()) + self.logfile.getvalue(), + ) def test_run_interval_blocked_during_resynch(self): """ @@ -220,7 +224,8 @@ def got_result(result): self.assertEqual( self.exchanger.get_client_accepted_message_types(), - sorted(["bar", "foo"] + DEFAULT_ACCEPTED_TYPES)) + sorted(["bar", "foo"] + DEFAULT_ACCEPTED_TYPES), + ) return gather_results([result1, result2]).addCallback(got_result) @@ -251,8 +256,10 @@ def dispatch_message(result): self.assertIs(self.client.dispatch_message(message), None) - self.assertTrue("Error running message handler for type 'foo'" in - self.logfile.getvalue()) + self.assertTrue( + "Error running message handler for type 'foo'" + in self.logfile.getvalue(), + ) handle_message.assert_called_once_with(message) result = self.client.register_message("foo", handle_message) @@ -263,8 +270,11 @@ L{BrokerClient.dispatch_message} raises an error if no handler was found for the given message. """ - error = self.assertRaises(HandlerNotFoundError, - self.client.dispatch_message, {"type": "x"}) + error = self.assertRaises( + HandlerNotFoundError, + self.client.dispatch_message, + {"type": "x"}, + ) self.assertEqual(str(error), "x") def test_message(self): @@ -326,8 +336,9 @@ self.client.add(plugin1) self.client.add(plugin2) self.client.exchange() - self.assertTrue("Error during plugin exchange" in - self.logfile.getvalue()) + self.assertTrue( + "Error during plugin exchange" in self.logfile.getvalue(), + ) self.assertTrue("ZeroDivisionError" in self.logfile.getvalue()) plugin1.exchange.assert_called_once_with() plugin2.exchange.assert_called_once_with() @@ -342,8 +353,10 @@ plugin.exchange = mock.Mock() self.client.add(plugin) self.client_reactor.fire("impending-exchange") - self.assertTrue("Got notification of impending exchange. " - "Notifying all plugins." in self.logfile.getvalue()) + self.assertTrue( + "Got notification of impending exchange. " + "Notifying all plugins." in self.logfile.getvalue(), + ) plugin.exchange.assert_called_once_with() def test_fire_event(self): @@ -415,7 +428,9 @@ calls = [mock.call("bar"), mock.call("foo")] broker.register_client_accepted_message_type.assert_has_calls( - calls, any_order=True) + calls, + any_order=True, + ) broker.register_client.assert_called_once_with("client") return gather_results([result1, result2]).addCallback(got_result) diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_config.py landscape-client-23.08/landscape/client/broker/tests/test_config.py --- landscape-client-23.02/landscape/client/broker/tests/test_config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,8 @@ import os from landscape.client.broker.config import BrokerConfiguration -from landscape.lib.testing import EnvironSaverHelper from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.testing import EnvironSaverHelper class ConfigurationTests(LandscapeTest): @@ -20,9 +20,16 @@ del os.environ["https_proxy"] configuration = BrokerConfiguration() - configuration.load(["--http-proxy", "foo", - "--https-proxy", "bar", - "--url", "whatever"]) + configuration.load( + [ + "--http-proxy", + "foo", + "--https-proxy", + "bar", + "--url", + "whatever", + ], + ) self.assertEqual(os.environ["http_proxy"], "foo") self.assertEqual(os.environ["https_proxy"], "bar") @@ -53,9 +60,9 @@ os.environ["https_proxy"] = "originals" configuration = BrokerConfiguration() - configuration.load(["--http-proxy", "x", - "--https-proxy", "y", - "--url", "whatever"]) + configuration.load( + ["--http-proxy", "x", "--https-proxy", "y", "--url", "whatever"], + ) self.assertEqual(os.environ["http_proxy"], "x") self.assertEqual(os.environ["https_proxy"], "y") @@ -74,10 +81,12 @@ The 'urgent_exchange_interval, 'exchange_interval' and 'ping_interval' values specified in the configuration file are converted to integers. """ - filename = self.makeFile("[client]\n" - "urgent_exchange_interval = 12\n" - "exchange_interval = 34\n" - "ping_interval = 6\n") + filename = self.makeFile( + "[client]\n" + "urgent_exchange_interval = 12\n" + "exchange_interval = 34\n" + "ping_interval = 6\n", + ) configuration = BrokerConfiguration() configuration.load(["--config", filename, "--url", "whatever"]) @@ -91,8 +100,9 @@ The 'tags' value specified in the configuration file is not converted to a list (it must be a string). See bug #1228301. """ - filename = self.makeFile("[client]\n" - "tags = check,linode,profile-test") + filename = self.makeFile( + "[client]\ntags = check,linode,profile-test", + ) configuration = BrokerConfiguration() configuration.load(["--config", filename, "--url", "whatever"]) @@ -104,8 +114,7 @@ The 'access_group' value specified in the configuration file is passed through. """ - filename = self.makeFile("[client]\n" - "access_group = webserver") + filename = self.makeFile("[client]\naccess_group = webserver") configuration = BrokerConfiguration() configuration.load(["--config", filename, "--url", "whatever"]) @@ -122,5 +131,7 @@ configuration = BrokerConfiguration() configuration.load(["--config", filename]) - self.assertEqual(configuration.url, - "https://landscape.canonical.com/message-system") + self.assertEqual( + configuration.url, + "https://landscape.canonical.com/message-system", + ) diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_exchange.py landscape-client-23.08/landscape/client/broker/tests/test_exchange.py --- landscape-client-23.02/landscape/client/broker/tests/test_exchange.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_exchange.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,22 +1,23 @@ -import mock +from unittest import mock from landscape import CLIENT_API -from landscape.lib.persist import Persist -from landscape.lib.fetch import HTTPCodeError, PyCurlError -from landscape.lib.hashlib import md5 -from landscape.lib.schema import Int -from landscape.message_schemas.message import Message from landscape.client.broker.config import BrokerConfiguration -from landscape.client.broker.exchange import ( - get_accepted_types_diff, MessageExchange) -from landscape.client.broker.transport import FakeTransport -from landscape.client.broker.store import MessageStore +from landscape.client.broker.exchange import get_accepted_types_diff +from landscape.client.broker.exchange import MessageExchange from landscape.client.broker.ping import Pinger from landscape.client.broker.registration import RegistrationHandler -from landscape.client.tests.helpers import ( - LandscapeTest, DEFAULT_ACCEPTED_TYPES) -from landscape.client.broker.tests.helpers import ExchangeHelper from landscape.client.broker.server import BrokerServer +from landscape.client.broker.store import MessageStore +from landscape.client.broker.tests.helpers import ExchangeHelper +from landscape.client.broker.transport import FakeTransport +from landscape.client.tests.helpers import DEFAULT_ACCEPTED_TYPES +from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.fetch import HTTPCodeError +from landscape.lib.fetch import PyCurlError +from landscape.lib.hashlib import md5 +from landscape.lib.persist import Persist +from landscape.lib.schema import Int +from landscape.message_schemas.message import Message class MessageExchangeTest(LandscapeTest): @@ -24,11 +25,11 @@ helpers = [ExchangeHelper] def setUp(self): - super(MessageExchangeTest, self).setUp() + super().setUp() self.mstore.add_schema(Message("empty", {})) self.mstore.add_schema(Message("data", {"data": Int()})) self.mstore.add_schema(Message("holdme", {})) - self.identity.secure_id = 'needs-to-be-set-for-tests-to-pass' + self.identity.secure_id = "needs-to-be-set-for-tests-to-pass" def wait_for_exchange(self, urgent=False, factor=1, delta=0): if urgent: @@ -52,9 +53,14 @@ session IDs are expired, so any new messages being sent with those IDs will be discarded. """ - broker = BrokerServer(self.config, self.reactor, - self.exchanger, None, - self.mstore, None) + broker = BrokerServer( + self.config, + self.reactor, + self.exchanger, + None, + self.mstore, + None, + ) disk_session_id = self.mstore.get_session_id(scope="disk") package_session_id = self.mstore.get_session_id(scope="package") @@ -72,9 +78,14 @@ When a resynchronisation event occurs with a scope existing session IDs for that scope are expired, all other session IDs are unaffected. """ - broker = BrokerServer(self.config, self.reactor, - self.exchanger, None, - self.mstore, None) + broker = BrokerServer( + self.config, + self.reactor, + self.exchanger, + None, + self.mstore, + None, + ) disk_session_id = self.mstore.get_session_id(scope="disk") package_session_id = self.mstore.get_session_id(scope="package") @@ -105,9 +116,10 @@ self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) messages = self.transport.payloads[0]["messages"] - self.assertEqual(messages, [{"type": "empty", - "timestamp": 0, - "api": b"3.2"}]) + self.assertEqual( + messages, + [{"type": "empty", "timestamp": 0, "api": b"3.2"}], + ) def test_send_urgent(self): """ @@ -118,8 +130,10 @@ self.exchanger.send({"type": "empty"}, urgent=True) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) - self.assertMessages(self.transport.payloads[0]["messages"], - [{"type": "empty"}]) + self.assertMessages( + self.transport.payloads[0]["messages"], + [{"type": "empty"}], + ) def test_send_urgent_wont_reschedule(self): """ @@ -132,8 +146,10 @@ self.exchanger.send({"type": "empty"}, urgent=True) self.wait_for_exchange(urgent=True, factor=0.5) self.assertEqual(len(self.transport.payloads), 1) - self.assertMessages(self.transport.payloads[0]["messages"], - [{"type": "empty"}, {"type": "empty"}]) + self.assertMessages( + self.transport.payloads[0]["messages"], + [{"type": "empty"}, {"type": "empty"}], + ) def test_send_returns_message_id(self): """ @@ -151,14 +167,15 @@ """ self.mstore.set_accepted_types(["package-reporter-result"]) self.exchanger._max_log_text_bytes = 5 - self.exchanger.send({"type": "package-reporter-result", "err": "E"*10, - "code": 0}) + self.exchanger.send( + {"type": "package-reporter-result", "err": "E" * 10, "code": 0}, + ) self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) messages = self.transport.payloads[0]["messages"] - self.assertIn('TRUNCATED', messages[0]['err']) - self.assertIn('EEEEE', messages[0]['err']) - self.assertNotIn('EEEEEE', messages[0]['err']) + self.assertIn("TRUNCATED", messages[0]["err"]) + self.assertIn("EEEEE", messages[0]["err"]) + self.assertNotIn("EEEEEE", messages[0]["err"]) def test_send_big_message_trimmed_result(self): """ @@ -166,14 +183,21 @@ """ self.mstore.set_accepted_types(["operation-result"]) self.exchanger._max_log_text_bytes = 5 - self.exchanger.send({"type": "operation-result", "result-text": "E"*10, - "code": 0, "status": 0, "operation-id": 0}) + self.exchanger.send( + { + "type": "operation-result", + "result-text": "E" * 10, + "code": 0, + "status": 0, + "operation-id": 0, + }, + ) self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) messages = self.transport.payloads[0]["messages"] - self.assertIn('TRUNCATED', messages[0]['result-text']) - self.assertIn('EEEEE', messages[0]['result-text']) - self.assertNotIn('EEEEEE', messages[0]['result-text']) + self.assertIn("TRUNCATED", messages[0]["result-text"]) + self.assertIn("EEEEE", messages[0]["result-text"]) + self.assertNotIn("EEEEEE", messages[0]["result-text"]) def test_send_small_message_not_trimmed(self): """ @@ -181,13 +205,14 @@ """ self.mstore.set_accepted_types(["package-reporter-result"]) self.exchanger._max_log_text_bytes = 4 - self.exchanger.send({"type": "package-reporter-result", "err": "E"*4, - "code": 0}) + self.exchanger.send( + {"type": "package-reporter-result", "err": "E" * 4, "code": 0}, + ) self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) messages = self.transport.payloads[0]["messages"] - self.assertNotIn('TRUNCATED', messages[0]['err']) - self.assertIn('EEEE', messages[0]['err']) + self.assertNotIn("TRUNCATED", messages[0]["err"]) + self.assertIn("EEEE", messages[0]["err"]) def test_wb_include_accepted_types(self): """ @@ -204,7 +229,8 @@ types. """ self.exchanger.handle_message( - {"type": "accepted-types", "types": ["foo"]}) + {"type": "accepted-types", "types": ["foo"]}, + ) self.assertEqual(self.mstore.get_accepted_types(), ["foo"]) def test_message_type_acceptance_changed_event(self): @@ -212,13 +238,18 @@ def callback(type, accepted): stash.append((type, accepted)) + self.reactor.call_on("message-type-acceptance-changed", callback) self.exchanger.handle_message( - {"type": "accepted-types", "types": ["a", "b"]}) + {"type": "accepted-types", "types": ["a", "b"]}, + ) self.exchanger.handle_message( - {"type": "accepted-types", "types": ["b", "c"]}) - self.assertCountEqual(stash, [("a", True), ("b", True), - ("a", False), ("c", True)]) + {"type": "accepted-types", "types": ["b", "c"]}, + ) + self.assertCountEqual( + stash, + [("a", True), ("b", True), ("a", False), ("c", True)], + ) def test_wb_accepted_types_roundtrip(self): """ @@ -226,11 +257,11 @@ should affect its future payloads. """ self.exchanger.handle_message( - {"type": "accepted-types", "types": ["ack", "bar"]}) + {"type": "accepted-types", "types": ["ack", "bar"]}, + ) payload = self.exchanger._make_payload() self.assertIn("accepted-types", payload) - self.assertEqual(payload["accepted-types"], - md5(b"ack;bar").digest()) + self.assertEqual(payload["accepted-types"], md5(b"ack;bar").digest()) def test_accepted_types_causes_urgent_if_held_messages_exist(self): """ @@ -240,11 +271,14 @@ self.exchanger.send({"type": "holdme"}) self.assertEqual(self.mstore.get_pending_messages(), []) self.exchanger.handle_message( - {"type": "accepted-types", "types": ["holdme"]}) + {"type": "accepted-types", "types": ["holdme"]}, + ) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) - self.assertMessages(self.transport.payloads[0]["messages"], - [{"type": "holdme"}]) + self.assertMessages( + self.transport.payloads[0]["messages"], + [{"type": "holdme"}], + ) def test_accepted_types_no_urgent_without_held(self): """ @@ -253,8 +287,10 @@ """ self.exchanger.send({"type": "holdme"}) self.assertEqual(self.transport.payloads, []) - self.reactor.fire("message", - {"type": "accepted-types", "types": ["irrelevant"]}) + self.reactor.fire( + "message", + {"type": "accepted-types", "types": ["irrelevant"]}, + ) self.assertEqual(len(self.transport.payloads), 0) def test_sequence_is_committed_immediately(self): @@ -294,8 +330,7 @@ def handler(message): Persist(filename=self.persist_filename) store = MessageStore(self.persist, self.config.message_store_path) - self.assertEqual(store.get_server_sequence(), - self.message_counter) + self.assertEqual(store.get_server_sequence(), self.message_counter) self.message_counter += 1 handled.append(True) @@ -324,8 +359,10 @@ self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 2) - self.assertMessages(self.transport.payloads[1]["messages"], - [{"type": "empty"}]) + self.assertMessages( + self.transport.payloads[1]["messages"], + [{"type": "empty"}], + ) def test_server_expects_older_messages(self): """ @@ -361,22 +398,29 @@ self.assertEqual(exchanged, [True]) payload = self.transport.payloads[-1] - self.assertMessages(payload["messages"], - [{"type": "data", "data": 1}, - {"type": "data", "data": 2}, - {"type": "data", "data": 3}]) + self.assertMessages( + payload["messages"], + [ + {"type": "data", "data": 1}, + {"type": "data", "data": 2}, + {"type": "data", "data": 3}, + ], + ) self.assertEqual(payload["sequence"], 1) self.assertEqual(payload["next-expected-sequence"], 0) - @mock.patch("landscape.client.broker.store.MessageStore" - ".delete_old_messages") - def test_pending_offset_when_next_expected_too_high(self, - mock_rm_all_messages): - ''' + @mock.patch( + "landscape.client.broker.store.MessageStore.delete_old_messages", + ) + def test_pending_offset_when_next_expected_too_high( + self, + mock_rm_all_messages, + ): + """ When next expected sequence received from server is too high, then the pending offset should reset to zero. This will cause the client to resend the pending messages. - ''' + """ self.mstore.set_accepted_types(["data"]) self.mstore.add({"type": "data", "data": 0}) @@ -399,12 +443,12 @@ self.assertTrue(mock_rm_all_messages.called) def test_payloads_when_next_expected_too_high(self): - ''' + """ When next expected sequence received from server is too high, then the current messages should get sent again since we don't have confirmation that the server received it. Also previous messages should not get repeated. - ''' + """ self.mstore.set_accepted_types(["data"]) @@ -426,17 +470,16 @@ self.assertTrue(last_messages) # Confirm earlier messages are not resent - self.assertNotIn(message0["data"], - [m["data"] for m in last_messages]) + self.assertNotIn(message0["data"], [m["data"] for m in last_messages]) # Confirm contents of payload self.assertEqual([message1, message2], last_messages) def test_resync_when_next_expected_too_high(self): - ''' + """ When next expected sequence received from the server is too high, then a resynchronize should happen - ''' + """ self.mstore.set_accepted_types(["empty", "resynchronize"]) self.mstore.add({"type": "empty"}) @@ -447,17 +490,24 @@ self.reactor.call_on("resynchronize-clients", lambda scope=None: None) self.exchanger.exchange() - self.assertMessage(self.mstore.get_pending_messages()[-1], - {"type": "resynchronize"}) + self.assertMessage( + self.mstore.get_pending_messages()[-1], + {"type": "resynchronize"}, + ) def test_start_with_urgent_exchange(self): """ Immediately after registration, an urgent exchange should be scheduled. """ transport = FakeTransport() - exchanger = MessageExchange(self.reactor, self.mstore, transport, - self.identity, self.exchange_store, - self.config) + exchanger = MessageExchange( + self.reactor, + self.mstore, + transport, + self.identity, + self.exchange_store, + self.config, + ) exchanger.start() self.wait_for_exchange(urgent=True) self.assertEqual(len(transport.payloads), 1) @@ -499,13 +549,17 @@ self.mstore.record_success = mock_record_success exchanger = MessageExchange( - self.reactor, self.mstore, self.transport, - self.identity, self.exchange_store, self.config) + self.reactor, + self.mstore, + self.transport, + self.identity, + self.exchange_store, + self.config, + ) exchanger.exchange() mock_record_success.assert_called_with(mock.ANY) - self.assertTrue( - type(mock_record_success.call_args[0][0]) is int) + self.assertTrue(type(mock_record_success.call_args[0][0]) is int) def test_ancient_causes_resynchronize(self): """ @@ -530,15 +584,20 @@ # should come AFTER the "resynchronize" message that is generated # by the exchange code itself. self.mstore.add({"type": "data", "data": 999}) + self.reactor.call_on("resynchronize-clients", resynchronize) # This exchange call will notice the server is asking for an old # message and fire the event: self.exchanger.exchange() - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "empty"}, - {"type": "resynchronize"}, - {"type": "data", "data": 999}]) + self.assertMessages( + self.mstore.get_pending_messages(), + [ + {"type": "empty"}, + {"type": "resynchronize"}, + {"type": "data", "data": 999}, + ], + ) def test_resynchronize_msg_causes_resynchronize_response_then_event(self): """ @@ -551,15 +610,20 @@ def resynchronized(scopes=None): self.mstore.add({"type": "empty"}) + self.reactor.call_on("resynchronize-clients", resynchronized) - self.transport.responses.append([{"type": "resynchronize", - "operation-id": 123}]) - self.exchanger.exchange() - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "resynchronize", - "operation-id": 123}, - {"type": "empty"}]) + self.transport.responses.append( + [{"type": "resynchronize", "operation-id": 123}], + ) + self.exchanger.exchange() + self.assertMessages( + self.mstore.get_pending_messages(), + [ + {"type": "resynchronize", "operation-id": 123}, + {"type": "empty"}, + ], + ) def test_scopes_are_copied_from_incoming_resynchronize_messages(self): """ @@ -574,9 +638,15 @@ self.reactor.call_on("resynchronize-clients", resynchronized) - self.transport.responses.append([{"type": "resynchronize", - "operation-id": 123, - "scopes": ["disk", "users"]}]) + self.transport.responses.append( + [ + { + "type": "resynchronize", + "operation-id": 123, + "scopes": ["disk", "users"], + }, + ], + ) self.exchanger.exchange() self.assertEqual(["disk", "users"], fired_scopes) @@ -622,8 +692,10 @@ def test_old_sequence_id_does_not_cause_resynchronize(self): resynchronized = [] - self.reactor.call_on("resynchronize", - lambda: resynchronized.append(True)) + self.reactor.call_on( + "resynchronize", + lambda: resynchronized.append(True), + ) self.mstore.set_accepted_types(["empty"]) self.mstore.add({"type": "empty"}) @@ -663,9 +735,10 @@ self.exchanger.exchange() payload = self.transport.payloads[-1] - self.assertMessages(payload["messages"], - [{"type": "a", "api": b"1.0"}, - {"type": "b", "api": b"1.0"}]) + self.assertMessages( + payload["messages"], + [{"type": "a", "api": b"1.0"}, {"type": "b", "api": b"1.0"}], + ) self.assertEqual(payload.get("client-api"), CLIENT_API) self.assertEqual(payload.get("server-api"), b"1.0") self.assertEqual(self.transport.message_api, b"1.0") @@ -673,9 +746,10 @@ self.exchanger.exchange() payload = self.transport.payloads[-1] - self.assertMessages(payload["messages"], - [{"type": "c", "api": b"1.1"}, - {"type": "d", "api": b"1.1"}]) + self.assertMessages( + payload["messages"], + [{"type": "c", "api": b"1.1"}, {"type": "d", "api": b"1.1"}], + ) self.assertEqual(payload.get("client-api"), CLIENT_API) self.assertEqual(payload.get("server-api"), b"1.1") self.assertEqual(self.transport.message_api, b"1.1") @@ -732,9 +806,15 @@ the total-messages is equivalent to the total number of messages pending. """ - exchanger = MessageExchange(self.reactor, self.mstore, self.transport, - self.identity, self.exchange_store, - self.config, max_messages=1) + exchanger = MessageExchange( + self.reactor, + self.mstore, + self.transport, + self.identity, + self.exchange_store, + self.config, + max_messages=1, + ) self.mstore.set_accepted_types(["empty"]) self.mstore.add({"type": "empty"}) self.mstore.add({"type": "empty"}) @@ -763,9 +843,14 @@ # fixture has an urgent exchange interval of 10 seconds, which makes # testing this awkward. self.config.urgent_exchange_interval = 20 - exchanger = MessageExchange(self.reactor, self.mstore, self.transport, - self.identity, self.exchange_store, - self.config) + exchanger = MessageExchange( + self.reactor, + self.mstore, + self.transport, + self.identity, + self.exchange_store, + self.config, + ) exchanger.schedule_exchange(urgent=True) events = [] self.reactor.call_on("impending-exchange", lambda: events.append(True)) @@ -783,9 +868,14 @@ """ self.config.exchange_interval = 60 * 60 self.config.urgent_exchange_interval = 20 - exchanger = MessageExchange(self.reactor, self.mstore, self.transport, - self.identity, self.exchange_store, - self.config) + exchanger = MessageExchange( + self.reactor, + self.mstore, + self.transport, + self.identity, + self.exchange_store, + self.config, + ) events = [] self.reactor.call_on("impending-exchange", lambda: events.append(True)) # This call will: @@ -806,11 +896,12 @@ # schedule a regular exchange. # Let's make sure that that *original* impending-exchange event has # been cancelled: - TIME_UNTIL_EXCHANGE = 60 * 60 - TIME_UNTIL_NOTIFY = 10 - TIME_ADVANCED = 20 # time that we've already advanced - self.reactor.advance(TIME_UNTIL_EXCHANGE - - (TIME_UNTIL_NOTIFY + TIME_ADVANCED)) + time_until_exchange = 60 * 60 + time_until_notify = 10 + time_advanced = 20 + self.reactor.advance( + time_until_exchange - (time_until_notify + time_advanced), + ) self.assertEqual(events, [True]) # Ok, so no new events means that the original call was # cancelled. great. @@ -877,8 +968,13 @@ the L{MessageExchange} are changed, the configuration values as well, and the configuration is written to disk to be persisted. """ - server_message = [{"type": "set-intervals", - "urgent-exchange": 1234, "exchange": 5678}] + server_message = [ + { + "type": "set-intervals", + "urgent-exchange": 1234, + "exchange": 5678, + }, + ] self.transport.responses.append(server_message) self.exchanger.exchange() @@ -993,6 +1089,7 @@ def server_uuid_changed(old_uuid, new_uuid): called.append((old_uuid, new_uuid)) + self.reactor.call_on("server-uuid-changed", server_uuid_changed) # Set it for the first time, and it should emit the event @@ -1028,6 +1125,7 @@ def server_uuid_changed(old_uuid, new_uuid): called.append((old_uuid, new_uuid)) + self.reactor.call_on("server-uuid-changed", server_uuid_changed) self.mstore.set_server_uuid("the-uuid") @@ -1039,8 +1137,10 @@ self.transport.extra["server-uuid"] = "the-uuid" self.exchanger.exchange() - self.assertIn("INFO: Server UUID changed (old=None, new=the-uuid).", - self.logfile.getvalue()) + self.assertIn( + "INFO: Server UUID changed (old=None, new=the-uuid).", + self.logfile.getvalue(), + ) # An exchange with the same UUID shouldn't be logged. self.logfile.truncate(0) @@ -1063,9 +1163,11 @@ [message] = messages self.assertIsNot( None, - self.exchange_store.get_message_context(message['operation-id'])) + self.exchange_store.get_message_context(message["operation-id"]), + ) message_context = self.exchange_store.get_message_context( - message['operation-id']) + message["operation-id"], + ) self.assertEqual(message_context.operation_id, 123456) self.assertEqual(message_context.message_type, "type-R") @@ -1099,12 +1201,13 @@ self.exchanger.exchange() # Change the secure ID so that the response message gets discarded. - self.identity.secure_id = 'brand-new' + self.identity.secure_id = "brand-new" ids_before = self.exchange_store.all_operation_ids() self.mstore.set_accepted_types(["resynchronize"]) message_id = self.exchanger.send( - {"type": "resynchronize", "operation-id": 234567}) + {"type": "resynchronize", "operation-id": 234567}, + ) self.exchanger.exchange() self.assertEqual(2, len(self.transport.payloads)) messages = self.transport.payloads[1]["messages"] @@ -1112,13 +1215,14 @@ self.assertIs(None, message_id) expected_log_entry = ( "Response message with operation-id 234567 was discarded because " - "the client's secure ID has changed in the meantime") + "the client's secure ID has changed in the meantime" + ) self.assertIn(expected_log_entry, self.logfile.getvalue()) # The MessageContext was removed after utilisation. ids_after = self.exchange_store.all_operation_ids() self.assertEqual(len(ids_after), len(ids_before) - 1) - self.assertNotIn('234567', ids_after) + self.assertNotIn("234567", ids_after) def test_error_exchanging_causes_failed_exchange(self): """ @@ -1135,13 +1239,14 @@ self.exchanger.exchange() self.assertEqual([None], events) - def test_SSL_error_exchanging_causes_failed_exchange(self): + def test_SSL_error_exchanging_causes_failed_exchange(self): # noqa: N802 """ If an SSL error occurs when exchanging, the 'exchange-failed' event should be fired with the optional "ssl_error" flag set to True. """ - self.log_helper.ignore_errors("Message exchange failed: Failed to " - "communicate.") + self.log_helper.ignore_errors( + "Message exchange failed: Failed to communicate.", + ) events = [] def failed_exchange(ssl_error): @@ -1149,8 +1254,9 @@ events.append(None) self.reactor.call_on("exchange-failed", failed_exchange) - self.transport.responses.append(PyCurlError(60, - "Failed to communicate.")) + self.transport.responses.append( + PyCurlError(60, "Failed to communicate."), + ) self.exchanger.exchange() self.assertEqual([None], events) @@ -1167,8 +1273,9 @@ events.append(None) self.reactor.call_on("exchange-failed", failed_exchange) - self.transport.responses.append(PyCurlError(10, # Not 60 - "Failed to communicate.")) + self.transport.responses.append( + PyCurlError(10, "Failed to communicate."), # Not 60 + ) self.exchanger.exchange() self.assertEqual([None], events) @@ -1278,14 +1385,23 @@ helpers = [ExchangeHelper] def setUp(self): - super(AcceptedTypesMessageExchangeTest, self).setUp() - self.pinger = Pinger(self.reactor, self.identity, self.exchanger, - self.config) + super().setUp() + self.pinger = Pinger( + self.reactor, + self.identity, + self.exchanger, + self.config, + ) # The __init__ method of RegistrationHandler registers a few default # message types that we want to catch as well self.handler = RegistrationHandler( - self.config, self.identity, self.reactor, self.exchanger, - self.pinger, self.mstore) + self.config, + self.identity, + self.reactor, + self.exchanger, + self.pinger, + self.mstore, + ) def test_register_accepted_message_type(self): self.exchanger.register_client_accepted_message_type("type-B") @@ -1293,9 +1409,10 @@ self.exchanger.register_client_accepted_message_type("type-C") self.exchanger.register_client_accepted_message_type("type-A") types = self.exchanger.get_client_accepted_message_types() - self.assertEqual(types, - sorted(["type-A", "type-B", "type-C"] + - DEFAULT_ACCEPTED_TYPES)) + self.assertEqual( + types, + sorted(["type-A", "type-B", "type-C"] + DEFAULT_ACCEPTED_TYPES), + ) def test_exchange_sends_message_type_when_no_hash(self): self.exchanger.register_client_accepted_message_type("type-A") @@ -1303,15 +1420,17 @@ self.exchanger.exchange() self.assertEqual( self.transport.payloads[0]["client-accepted-types"], - sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES)) + sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES), + ) def test_exchange_does_not_send_message_types_when_hash_matches(self): self.exchanger.register_client_accepted_message_type("type-A") self.exchanger.register_client_accepted_message_type("type-B") types = sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES) accepted_types_digest = md5(";".join(types).encode("ascii")).digest() - self.transport.extra["client-accepted-types-hash"] = \ - accepted_types_digest + self.transport.extra[ + "client-accepted-types-hash" + ] = accepted_types_digest self.exchanger.exchange() self.exchanger.exchange() self.assertNotIn("client-accepted-types", self.transport.payloads[1]) @@ -1327,7 +1446,8 @@ self.exchanger.exchange() self.assertEqual( self.transport.payloads[1]["client-accepted-types"], - sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES)) + sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES), + ) def test_exchange_sends_new_accepted_types_hash(self): """ @@ -1342,7 +1462,8 @@ self.exchanger.exchange() self.assertEqual( self.transport.payloads[1]["client-accepted-types"], - sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES)) + sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES), + ) def test_exchange_sends_new_types_when_server_screws_up(self): """ @@ -1359,7 +1480,8 @@ self.exchanger.exchange() self.assertEqual( self.transport.payloads[2]["client-accepted-types"], - sorted(["type-A"] + DEFAULT_ACCEPTED_TYPES)) + sorted(["type-A"] + DEFAULT_ACCEPTED_TYPES), + ) def test_register_message_adds_accepted_type(self): """ @@ -1373,7 +1495,6 @@ class GetAcceptedTypesDiffTest(LandscapeTest): - def test_diff_empty(self): self.assertEqual(get_accepted_types_diff([], []), "") @@ -1387,6 +1508,7 @@ self.assertEqual(get_accepted_types_diff(["ooga"], ["ooga"]), "ooga") def test_diff_complex(self): - self.assertEqual(get_accepted_types_diff(["foo", "bar"], - ["foo", "ooga"]), - "+ooga foo -bar") + self.assertEqual( + get_accepted_types_diff(["foo", "bar"], ["foo", "ooga"]), + "+ooga foo -bar", + ) diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_exchangestore.py landscape-client-23.08/landscape/client/broker/tests/test_exchangestore.py --- landscape-client-23.02/landscape/client/broker/tests/test_exchangestore.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_exchangestore.py 2023-08-17 21:19:32.000000000 +0000 @@ -14,7 +14,7 @@ """Unit tests for the C{ExchangeStore}.""" def setUp(self): - super(ExchangeStoreTest, self).setUp() + super().setUp() self.filename = self.makeFile() self.store1 = ExchangeStore(self.filename) @@ -23,19 +23,21 @@ def test_add_message_context(self): """Adding a message context works correctly.""" now = time.time() - self.store1.add_message_context(123, 'abc', 'change-packages') + self.store1.add_message_context(123, "abc", "change-packages") db = sqlite3.connect(self.store2._filename) cursor = db.cursor() cursor.execute( "SELECT operation_id, secure_id, message_type, timestamp " - "FROM message_context WHERE operation_id=?", (123,)) + "FROM message_context WHERE operation_id=?", + (123,), + ) results = cursor.fetchall() self.assertEqual(1, len(results)) [row] = results self.assertEqual(123, row[0]) - self.assertEqual('abc', row[1]) - self.assertEqual('change-packages', row[2]) + self.assertEqual("abc", row[1]) + self.assertEqual("change-packages", row[2]) self.assertTrue(row[3] > now) def test_add_message_context_with_duplicate_operation_id(self): @@ -43,18 +45,22 @@ self.store1.add_message_context(123, "abc", "change-packages") self.assertRaises( (sqlite3.IntegrityError, sqlite3.OperationalError), - self.store1.add_message_context, 123, "def", "change-packages") + self.store1.add_message_context, + 123, + "def", + "change-packages", + ) def test_get_message_context(self): """ Accessing a C{MessageContext} with an existing C{operation-id} works. """ now = time.time() - self.store1.add_message_context(234, 'bcd', 'change-packages') + self.store1.add_message_context(234, "bcd", "change-packages") context = self.store2.get_message_context(234) self.assertEqual(234, context.operation_id) - self.assertEqual('bcd', context.secure_id) - self.assertEqual('change-packages', context.message_type) + self.assertEqual("bcd", context.secure_id) + self.assertEqual("change-packages", context.message_type) self.assertTrue(context.timestamp > now) def test_get_message_context_with_nonexistent_operation_id(self): @@ -65,7 +71,10 @@ def test_message_context_remove(self): """C{MessageContext}s are deleted correctly.""" context = self.store1.add_message_context( - 345, 'opq', 'change-packages') + 345, + "opq", + "change-packages", + ) context.remove() self.assertIs(None, self.store1.get_message_context(345)) @@ -78,7 +87,7 @@ def test_all_operation_ids(self): """C{all_operation_ids} works correctly.""" - self.store1.add_message_context(456, 'cde', 'change-packages') + self.store1.add_message_context(456, "cde", "change-packages") self.assertEqual([456], self.store2.all_operation_ids()) - self.store2.add_message_context(567, 'def', 'change-packages') + self.store2.add_message_context(567, "def", "change-packages") self.assertEqual([456, 567], self.store1.all_operation_ids()) diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_ping.py landscape-client-23.08/landscape/client/broker/tests/test_ping.py --- landscape-client-23.02/landscape/client/broker/tests/test_ping.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_ping.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,15 @@ -from landscape.client.tests.helpers import LandscapeTest - from twisted.internet.defer import fail +from landscape.client.broker.ping import PingClient +from landscape.client.broker.ping import Pinger +from landscape.client.broker.tests.helpers import ExchangeHelper +from landscape.client.tests.helpers import LandscapeTest from landscape.lib import bpickle from landscape.lib.fetch import fetch from landscape.lib.testing import FakeReactor -from landscape.client.broker.ping import PingClient, Pinger -from landscape.client.broker.tests.helpers import ExchangeHelper -class FakePageGetter(object): +class FakePageGetter: """An fake web client.""" def __init__(self, response): @@ -39,9 +39,8 @@ class PingClientTest(LandscapeTest): - def setUp(self): - super(PingClientTest, self).setUp() + super().setUp() self.reactor = FakeReactor() def test_default_get_page(self): @@ -64,8 +63,15 @@ pinger.ping(url, insecure_id) self.assertEqual( client.fetches, - [(url, True, {"Content-Type": "application/x-www-form-urlencoded"}, - "insecure_id=10")]) + [ + ( + url, + True, + {"Content-Type": "application/x-www-form-urlencoded"}, + "insecure_id=10", + ), + ], + ) def test_ping_no_insecure_id(self): """ @@ -100,6 +106,7 @@ def errback(failure): failures.append(failure) + d.addErrback(errback) self.assertEqual(len(failures), 1) self.assertEqual(failures[0].getErrorMessage(), "That's a failure!") @@ -116,7 +123,7 @@ install_exchanger = False def setUp(self): - super(PingerTest, self).setUp() + super().setUp() self.page_getter = FakePageGetter(None) def factory(reactor): @@ -125,21 +132,25 @@ self.config.ping_url = "http://localhost:8081/whatever" self.config.ping_interval = 10 - self.pinger = Pinger(self.reactor, - self.identity, - self.exchanger, - self.config, - ping_client_factory=factory) + self.pinger = Pinger( + self.reactor, + self.identity, + self.exchanger, + self.config, + ping_client_factory=factory, + ) def test_default_ping_client(self): """ The C{ping_client_factory} argument to L{Pinger} should be optional, and default to L{PingClient}. """ - pinger = Pinger(self.reactor, - self.identity, - self.exchanger, - self.config) + pinger = Pinger( + self.reactor, + self.identity, + self.exchanger, + self.config, + ) self.assertEqual(pinger.ping_client_factory, PingClient) def test_occasional_ping(self): @@ -195,7 +206,7 @@ self.log_helper.ignore_errors(ZeroDivisionError) self.identity.insecure_id = 42 - class BadPingClient(object): + class BadPingClient: def __init__(self, *args, **kwargs): pass @@ -204,11 +215,13 @@ return fail(ZeroDivisionError("Couldn't fetch page")) self.config.ping_url = "http://foo.com/" - pinger = Pinger(self.reactor, - self.identity, - self.exchanger, - self.config, - ping_client_factory=BadPingClient) + pinger = Pinger( + self.reactor, + self.identity, + self.exchanger, + self.config, + ping_client_factory=BadPingClient, + ) pinger.start() self.reactor.advance(30) @@ -238,8 +251,10 @@ self.assertEqual(len(self.page_getter.fetches), 1) def test_get_url(self): - self.assertEqual(self.pinger.get_url(), - "http://localhost:8081/whatever") + self.assertEqual( + self.pinger.get_url(), + "http://localhost:8081/whatever", + ) def test_config_url(self): """ diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_registration.py landscape-client-23.08/landscape/client/broker/tests/test_registration.py --- landscape-client-23.02/landscape/client/broker/tests/test_registration.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_registration.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,14 @@ import json import logging import socket -import mock +from unittest import mock -from landscape.lib.compat import _PY3 - -from landscape.client.broker.registration import RegistrationError, Identity +from landscape.client.broker.registration import Identity +from landscape.client.broker.registration import RegistrationError +from landscape.client.broker.tests.helpers import BrokerConfigurationHelper +from landscape.client.broker.tests.helpers import RegistrationHelper from landscape.client.tests.helpers import LandscapeTest -from landscape.client.broker.tests.helpers import ( - BrokerConfigurationHelper, RegistrationHelper) +from landscape.lib.compat import _PY3 from landscape.lib.persist import Persist @@ -17,33 +17,43 @@ helpers = [BrokerConfigurationHelper] def setUp(self): - super(IdentityTest, self).setUp() + super().setUp() self.persist = Persist(filename=self.makePersistFile()) self.identity = Identity(self.config, self.persist) def check_persist_property(self, attr, persist_name): value = "VALUE" - self.assertEqual(getattr(self.identity, attr), None, - "%r attribute should default to None, not %r" % - (attr, getattr(self.identity, attr))) + self.assertEqual( + getattr(self.identity, attr), + None, + f"{attr!r} attribute should default to None, " + f"not {getattr(self.identity, attr)!r}", + ) setattr(self.identity, attr, value) - self.assertEqual(getattr(self.identity, attr), value, - "%r attribute should be %r, not %r" % - (attr, value, getattr(self.identity, attr))) self.assertEqual( - self.persist.get(persist_name), value, - "%r not set to %r in persist" % (persist_name, value)) + getattr(self.identity, attr), + value, + f"{attr!r} attribute should be {value!r}, " + f"not {getattr(self.identity, attr)!r}", + ) + self.assertEqual( + self.persist.get(persist_name), + value, + f"{persist_name!r} not set to {value!r} in persist", + ) def check_config_property(self, attr): value = "VALUE" setattr(self.config, attr, value) - self.assertEqual(getattr(self.identity, attr), value, - "%r attribute should be %r, not %r" % - (attr, value, getattr(self.identity, attr))) + self.assertEqual( + getattr(self.identity, attr), + value, + f"{attr!r} attribute should be {value!r}, " + f"not {getattr(self.identity, attr)!r}", + ) def test_secure_id(self): - self.check_persist_property("secure_id", - "registration.secure-id") + self.check_persist_property("secure_id", "registration.secure-id") def test_secure_id_as_unicode(self): """secure-id is expected to be retrieved as unicode.""" @@ -51,8 +61,7 @@ self.assertEqual(self.identity.secure_id, "spam") def test_insecure_id(self): - self.check_persist_property("insecure_id", - "registration.insecure-id") + self.check_persist_property("insecure_id", "registration.insecure-id") def test_computer_title(self): self.check_config_property("computer_title") @@ -75,7 +84,7 @@ helpers = [RegistrationHelper] def setUp(self): - super(RegistrationHandlerTestBase, self).setUp() + super().setUp() logging.getLogger().setLevel(logging.INFO) self.hostname = "ooga.local" self.addCleanup(setattr, socket, "getfqdn", socket.getfqdn) @@ -83,14 +92,14 @@ class RegistrationHandlerTest(RegistrationHandlerTestBase): - def test_server_initiated_id_changing(self): """ The server must be able to ask a client to change its secure and insecure ids even if no requests were sent. """ self.exchanger.handle_message( - {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}) + {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}, + ) self.assertEqual(self.identity.secure_id, "abc") self.assertEqual(self.identity.insecure_id, "def") @@ -101,7 +110,8 @@ """ reactor_fire_mock = self.reactor.fire = mock.Mock() self.exchanger.handle_message( - {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}) + {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}, + ) reactor_fire_mock.assert_any_call("registration-done") def test_unknown_id(self): @@ -120,9 +130,12 @@ self.config.computer_title = "Wu" self.mstore.set_accepted_types(["register"]) self.exchanger.handle_message( - {"type": b"unknown-id", "clone-of": "Wu"}) - self.assertIn("Client is clone of computer Wu", - self.logfile.getvalue()) + {"type": b"unknown-id", "clone-of": "Wu"}, + ) + self.assertIn( + "Client is clone of computer Wu", + self.logfile.getvalue(), + ) def test_clone_secure_id_saved(self): """ @@ -134,7 +147,8 @@ self.config.computer_title = "Wu" self.mstore.set_accepted_types(["register"]) self.exchanger.handle_message( - {"type": b"unknown-id", "clone-of": "Wu"}) + {"type": b"unknown-id", "clone-of": "Wu"}, + ) self.assertEqual(self.handler._clone_secure_id, secure_id) self.assertIsNone(self.identity.secure_id) @@ -148,7 +162,8 @@ self.mstore.set_accepted_types(["register"]) self.mstore.set_server_api(b"3.3") # Note this is only for later api self.exchanger.handle_message( - {"type": b"unknown-id", "clone-of": "Wu"}) + {"type": b"unknown-id", "clone-of": "Wu"}, + ) self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual(messages[0]["clone_secure_id"], secure_id) @@ -192,9 +207,11 @@ messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) self.assertEqual("register", messages[0]["type"]) - self.assertEqual(self.logfile.getvalue().strip(), - "INFO: Queueing message to register with account " - "'account_name' without a password.") + self.assertEqual( + self.logfile.getvalue().strip(), + "INFO: Queueing message to register with account " + "'account_name' without a password.", + ) @mock.patch("landscape.client.broker.registration.get_vm_info") def test_queue_message_on_exchange_with_vm_info(self, get_vm_info_mock): @@ -210,14 +227,18 @@ self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual(b"vmware", messages[0]["vm-info"]) - self.assertEqual(self.logfile.getvalue().strip(), - "INFO: Queueing message to register with account " - "'account_name' without a password.") + self.assertEqual( + self.logfile.getvalue().strip(), + "INFO: Queueing message to register with account " + "'account_name' without a password.", + ) get_vm_info_mock.assert_called_once_with() @mock.patch("landscape.client.broker.registration.get_container_info") def test_queue_message_on_exchange_with_lxc_container( - self, get_container_info_mock): + self, + get_container_info_mock, + ): """ If the client is running in an LXC container, the information is included in the registration message. @@ -241,9 +262,11 @@ messages = self.mstore.get_pending_messages() password = messages[0]["registration_password"] self.assertEqual("SEKRET", password) - self.assertEqual(self.logfile.getvalue().strip(), - "INFO: Queueing message to register with account " - "'account_name' with a password.") + self.assertEqual( + self.logfile.getvalue().strip(), + "INFO: Queueing message to register with account " + "'account_name' with a password.", + ) def test_queue_message_on_exchange_with_tags(self): """ @@ -254,14 +277,16 @@ self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" - self.config.tags = u"computer,tag" + self.config.tags = "computer,tag" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual("computer,tag", messages[0]["tags"]) - self.assertEqual(self.logfile.getvalue().strip(), - "INFO: Queueing message to register with account " - "'account_name' and tags computer,tag with a " - "password.") + self.assertEqual( + self.logfile.getvalue().strip(), + "INFO: Queueing message to register with account " + "'account_name' and tags computer,tag with a " + "password.", + ) def test_queue_message_on_exchange_with_invalid_tags(self): """ @@ -273,14 +298,16 @@ self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" - self.config.tags = u"" + self.config.tags = "" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertIs(None, messages[0]["tags"]) - self.assertEqual(self.logfile.getvalue().strip(), - "ERROR: Invalid tags provided for registration.\n " - "INFO: Queueing message to register with account " - "'account_name' with a password.") + self.assertEqual( + self.logfile.getvalue().strip(), + "ERROR: Invalid tags provided for registration.\n " + "INFO: Queueing message to register with account " + "'account_name' with a password.", + ) def test_queue_message_on_exchange_with_unicode_tags(self): """ @@ -291,10 +318,10 @@ self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" - self.config.tags = u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o" + self.config.tags = "prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() - expected = u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o" + expected = "prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o" self.assertEqual(expected, messages[0]["tags"]) logs = self.logfile.getvalue().strip() @@ -306,10 +333,12 @@ # here, to circumvent that problem. if _PY3: logs = logs.encode("utf-8") - self.assertEqual(logs, - b"INFO: Queueing message to register with account " - b"'account_name' and tags prova\xc4\xb5o " - b"with a password.") + self.assertEqual( + logs, + b"INFO: Queueing message to register with account " + b"'account_name' and tags prova\xc4\xb5o " + b"with a password.", + ) def test_queue_message_on_exchange_with_access_group(self): """ @@ -318,15 +347,17 @@ """ self.mstore.set_accepted_types(["register"]) self.config.account_name = "account_name" - self.config.access_group = u"dinosaurs" - self.config.tags = u"server,london" + self.config.access_group = "dinosaurs" + self.config.tags = "server,london" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual("dinosaurs", messages[0]["access_group"]) - self.assertEqual(self.logfile.getvalue().strip(), - "INFO: Queueing message to register with account " - "'account_name' in access group 'dinosaurs' and " - "tags server,london without a password.") + self.assertEqual( + self.logfile.getvalue().strip(), + "INFO: Queueing message to register with account " + "'account_name' in access group 'dinosaurs' and " + "tags server,london without a password.", + ) def test_queue_message_on_exchange_with_empty_access_group(self): """ @@ -334,7 +365,7 @@ an "access_group" key. """ self.mstore.set_accepted_types(["register"]) - self.config.access_group = u"" + self.config.access_group = "" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() # Make sure the key does not appear in the outgoing message. @@ -368,8 +399,7 @@ self.assertEqual(messages[0]["type"], "register") def test_no_message_when_should_register_is_false(self): - """If we already have a secure id, do not queue a register message. - """ + """If we already have a secure id, do not queue a register message.""" self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" @@ -395,9 +425,12 @@ """ reactor_fire_mock = self.reactor.fire = mock.Mock() self.exchanger.handle_message( - {"type": b"registration", "info": b"unknown-account"}) + {"type": b"registration", "info": b"unknown-account"}, + ) reactor_fire_mock.assert_called_with( - "registration-failed", reason="unknown-account") + "registration-failed", + reason="unknown-account", + ) def test_registration_failed_event_max_pending_computers(self): """ @@ -407,9 +440,12 @@ """ reactor_fire_mock = self.reactor.fire = mock.Mock() self.exchanger.handle_message( - {"type": b"registration", "info": b"max-pending-computers"}) + {"type": b"registration", "info": b"max-pending-computers"}, + ) reactor_fire_mock.assert_called_with( - "registration-failed", reason="max-pending-computers") + "registration-failed", + reason="max-pending-computers", + ) def test_registration_failed_event_not_fired_when_uncertain(self): """ @@ -418,7 +454,8 @@ """ reactor_fire_mock = self.reactor.fire = mock.Mock() self.exchanger.handle_message( - {"type": b"registration", "info": b"blah-blah"}) + {"type": b"registration", "info": b"blah-blah"}, + ) for name, args, kwargs in reactor_fire_mock.mock_calls: self.assertNotEquals("registration-failed", args[0]) @@ -449,13 +486,15 @@ # This should somehow callback the deferred. self.exchanger.handle_message( - {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}) + {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}, + ) self.assertEqual(calls, [1]) # Doing it again to ensure that the deferred isn't called twice. self.exchanger.handle_message( - {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}) + {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}, + ) self.assertEqual(calls, [1]) @@ -477,7 +516,8 @@ # This should somehow callback the deferred. self.exchanger.handle_message( - {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}) + {"type": b"set-id", "id": b"abc", "insecure-id": b"def"}, + ) self.assertEqual(results, [None]) @@ -502,13 +542,15 @@ # This should somehow callback the deferred. self.exchanger.handle_message( - {"type": b"registration", "info": b"unknown-account"}) + {"type": b"registration", "info": b"unknown-account"}, + ) self.assertEqual(calls, [True]) # Doing it again to ensure that the deferred isn't called twice. self.exchanger.handle_message( - {"type": b"registration", "info": b"unknown-account"}) + {"type": b"registration", "info": b"unknown-account"}, + ) self.assertEqual(calls, [True]) @@ -534,13 +576,15 @@ d.addErrback(add_call) self.exchanger.handle_message( - {"type": b"registration", "info": b"max-pending-computers"}) + {"type": b"registration", "info": b"max-pending-computers"}, + ) self.assertEqual(calls, [True]) # Doing it again to ensure that the deferred isn't called twice. self.exchanger.handle_message( - {"type": b"registration", "info": b"max-pending-computers"}) + {"type": b"registration", "info": b"max-pending-computers"}, + ) self.assertEqual(calls, [True]) @@ -575,9 +619,13 @@ class JujuRegistrationHandlerTest(RegistrationHandlerTestBase): - juju_contents = json.dumps({"environment-uuid": "DEAD-BEEF", - "machine-id": "1", - "api-addresses": "10.0.3.1:17070"}) + juju_contents = json.dumps( + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "1", + "api-addresses": "10.0.3.1:17070", + }, + ) def test_juju_info_added_when_present(self): """ @@ -593,10 +641,13 @@ messages = self.mstore.get_pending_messages() self.assertEqual( - {"environment-uuid": "DEAD-BEEF", - "machine-id": "1", - "api-addresses": ["10.0.3.1:17070"]}, - messages[0]["juju-info"]) + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "1", + "api-addresses": ["10.0.3.1:17070"], + }, + messages[0]["juju-info"], + ) def test_juju_info_skipped_with_old_server(self): """ diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_server.py landscape-client-23.08/landscape/client/broker/tests/test_server.py --- landscape-client-23.02/landscape/client/broker/tests/test_server.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_server.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,23 +1,23 @@ import random +from unittest.mock import Mock from configobj import ConfigObj -from mock import Mock -from twisted.internet.defer import succeed, fail +from twisted.internet.defer import fail +from twisted.internet.defer import succeed -from landscape.client.manager.manager import FAILED -from landscape.client.tests.helpers import ( - LandscapeTest, DEFAULT_ACCEPTED_TYPES) -from landscape.client.broker.tests.helpers import ( - BrokerServerHelper, RemoteClientHelper) +from landscape.client.broker.tests.helpers import BrokerServerHelper +from landscape.client.broker.tests.helpers import RemoteClientHelper from landscape.client.broker.tests.test_ping import FakePageGetter +from landscape.client.manager.manager import FAILED +from landscape.client.tests.helpers import DEFAULT_ACCEPTED_TYPES +from landscape.client.tests.helpers import LandscapeTest -class FakeClient(object): +class FakeClient: pass -class FakeCreator(object): - +class FakeCreator: def __init__(self, reactor, config): pass @@ -105,7 +105,11 @@ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) self.assertRaises( - RuntimeError, self.broker.send_message, message, None) + RuntimeError, + self.broker.send_message, + message, + None, + ) def test_send_message_with_old_release_upgrader(self): """ @@ -123,8 +127,11 @@ If we receive a message from an old package-changer process that doesn't know about session IDs, we just let the message in. """ - message = {"type": "change-packages-result", "operation-id": 99, - "result-code": 123} + message = { + "type": "change-packages-result", + "operation-id": 99, + "result-code": 123, + } self.mstore.set_accepted_types(["change-packages-result"]) self.broker.send_message(message, True) self.assertMessages(self.mstore.get_pending_messages(), [message]) @@ -138,14 +145,17 @@ legacy_message = { b"type": b"change-packages-result", b"operation-id": 99, - b"result-code": 123} + b"result-code": 123, + } self.mstore.set_accepted_types(["change-packages-result"]) self.broker.send_message(legacy_message, True) - expected = [{ - "type": "change-packages-result", - "operation-id": 99, - "result-code": 123 - }] + expected = [ + { + "type": "change-packages-result", + "operation-id": 99, + "result-code": 123, + }, + ] self.assertMessages(self.mstore.get_pending_messages(), expected) self.assertTrue(self.exchanger.is_urgent()) @@ -176,9 +186,11 @@ self.assertEqual(len(self.broker.get_clients()), 1) self.assertEqual(len(self.broker.get_connectors()), 1) self.assertTrue( - isinstance(self.broker.get_client("test"), FakeClient)) + isinstance(self.broker.get_client("test"), FakeClient), + ) self.assertTrue( - isinstance(self.broker.get_connector("test"), FakeCreator)) + isinstance(self.broker.get_connector("test"), FakeCreator), + ) self.broker.connectors_registry = {"test": FakeCreator} result = self.broker.register_client("test") @@ -190,8 +202,10 @@ of each registered client, and returns a deferred resulting in C{None} if all C{exit} calls were successful. """ - self.broker.connectors_registry = {"foo": FakeCreator, - "bar": FakeCreator} + self.broker.connectors_registry = { + "foo": FakeCreator, + "bar": FakeCreator, + } self.broker.register_client("foo") self.broker.register_client("bar") for client in self.broker.get_clients(): @@ -203,8 +217,10 @@ The L{BrokerServer.stop_clients} method calls the C{exit} method of each registered client, and raises an exception if any calls fail. """ - self.broker.connectors_registry = {"foo": FakeCreator, - "bar": FakeCreator} + self.broker.connectors_registry = { + "foo": FakeCreator, + "bar": FakeCreator, + } self.broker.register_client("foo") self.broker.register_client("bar") [client1, client2] = self.broker.get_clients() @@ -221,8 +237,12 @@ config_obj["client"]["computer_title"] = "New Title" config_obj.write() result = self.broker.reload_configuration() - result.addCallback(lambda x: self.assertEqual( - self.config.computer_title, "New Title")) + result.addCallback( + lambda x: self.assertEqual( + self.config.computer_title, + "New Title", + ), + ) return result def test_reload_configuration_stops_clients(self): @@ -230,8 +250,10 @@ The L{BrokerServer.reload_configuration} method forces the config file associated with the broker server to be reloaded. """ - self.broker.connectors_registry = {"foo": FakeCreator, - "bar": FakeCreator} + self.broker.connectors_registry = { + "foo": FakeCreator, + "bar": FakeCreator, + } self.broker.register_client("foo") self.broker.register_client("bar") for client in self.broker.get_clients(): @@ -245,8 +267,9 @@ """ registered = self.broker.register() # This should callback the deferred. - self.exchanger.handle_message({"type": "set-id", "id": "abc", - "insecure-id": "def"}) + self.exchanger.handle_message( + {"type": "set-id", "id": "abc", "insecure-id": "def"}, + ) return self.assertSuccess(registered) def test_get_accepted_types_empty(self): @@ -263,8 +286,10 @@ message types accepted by the Landscape server. """ self.mstore.set_accepted_types(["foo", "bar"]) - self.assertEqual(sorted(self.broker.get_accepted_message_types()), - ["bar", "foo"]) + self.assertEqual( + sorted(self.broker.get_accepted_message_types()), + ["bar", "foo"], + ) def test_get_server_uuid_with_unset_uuid(self): """ @@ -288,8 +313,10 @@ """ self.broker.register_client_accepted_message_type("type1") self.broker.register_client_accepted_message_type("type2") - self.assertEqual(self.exchanger.get_client_accepted_message_types(), - sorted(["type1", "type2"] + DEFAULT_ACCEPTED_TYPES)) + self.assertEqual( + self.exchanger.get_client_accepted_message_types(), + sorted(["type1", "type2"] + DEFAULT_ACCEPTED_TYPES), + ) def test_fire_event(self): """ @@ -304,8 +331,10 @@ """ The L{BrokerServer.exit} method stops all registered clients. """ - self.broker.connectors_registry = {"foo": FakeCreator, - "bar": FakeCreator} + self.broker.connectors_registry = { + "foo": FakeCreator, + "bar": FakeCreator, + } self.broker.register_client("foo") self.broker.register_client("bar") for client in self.broker.get_clients(): @@ -430,8 +459,10 @@ """ callback = Mock(return_value="foo") self.client_reactor.call_on("resynchronize", callback) - return self.assertSuccess(self.broker.resynchronize(["foo"]), - [["foo"]]) + return self.assertSuccess( + self.broker.resynchronize(["foo"]), + [["foo"]], + ) def test_impending_exchange(self): """ @@ -448,7 +479,9 @@ plugin.exchange.assert_called_once_with() deferred = self.assertSuccess( - self.broker.impending_exchange(), [[None]]) + self.broker.impending_exchange(), + [[None]], + ) deferred.addCallback(assert_called) return deferred @@ -464,12 +497,15 @@ self.remote.register_client = Mock() def assert_called_made(ignored): - self.remote.register_client_accepted_message_type\ - .assert_called_once_with("type") + self.remote.register_client_accepted_message_type.assert_called_once_with( # noqa: E501 + "type", + ) self.remote.register_client.assert_called_once_with("client") deferred = self.assertSuccess( - self.broker.broker_reconnect(), [[None]]) + self.broker.broker_reconnect(), + [[None]], + ) return deferred.addCallback(assert_called_made) registered = self.client.register_message("type", lambda x: None) @@ -488,7 +524,9 @@ self.client_reactor.call_on("server-uuid-changed", callback) deferred = self.assertSuccess( - self.broker.server_uuid_changed(None, "abc"), [[return_value]]) + self.broker.server_uuid_changed(None, "abc"), + [[return_value]], + ) return deferred.addCallback(assert_called) def test_message_type_acceptance_changed(self): @@ -499,7 +537,9 @@ return_value = random.randint(1, 100) callback = Mock(return_value=return_value) self.client_reactor.call_on( - ("message-type-acceptance-changed", "type"), callback) + ("message-type-acceptance-changed", "type"), + callback, + ) result = self.broker.message_type_acceptance_changed("type", True) return self.assertSuccess(result, [[return_value]]) @@ -512,7 +552,9 @@ callback = Mock(return_value=return_value) self.client_reactor.call_on("package-data-changed", callback) return self.assertSuccess( - self.broker.package_data_changed(), [[return_value]]) + self.broker.package_data_changed(), + [[return_value]], + ) class HandlersTest(LandscapeTest): @@ -520,7 +562,7 @@ helpers = [BrokerServerHelper] def setUp(self): - super(HandlersTest, self).setUp() + super().setUp() self.broker.connectors_registry = {"test": FakeCreator} self.broker.register_client("test") self.client = self.broker.get_client("test") @@ -549,18 +591,25 @@ result = self.reactor.fire("message", message) result = [i for i in result if i is not None][0] - class StartsWith(object): - + class StartsWith: def __eq__(self, other): return other.startswith( - "Landscape client failed to handle this request (foobar)") + "Landscape client failed to handle this request (foobar)", + ) def broadcasted(ignored): self.client.message.assert_called_once_with(message) self.assertMessages( self.mstore.get_pending_messages(), - [{"type": "operation-result", "status": FAILED, - "result-text": StartsWith(), "operation-id": 4}]) + [ + { + "type": "operation-result", + "status": FAILED, + "result-text": StartsWith(), + "operation-id": 4, + }, + ], + ) result.addCallback(broadcasted) return result @@ -582,7 +631,10 @@ self.client.fire_event = Mock(return_value=succeed(None)) self.reactor.fire("message-type-acceptance-changed", "test", True) self.client.fire_event.assert_called_once_with( - "message-type-acceptance-changed", "test", True) + "message-type-acceptance-changed", + "test", + True, + ) def test_server_uuid_changed(self): """ @@ -592,7 +644,10 @@ self.client.fire_event = Mock(return_value=succeed(None)) self.reactor.fire("server-uuid-changed", None, 123) self.client.fire_event.assert_called_once_with( - "server-uuid-changed", None, 123) + "server-uuid-changed", + None, + 123, + ) def test_package_data_changed(self): """ diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_service.py landscape-client-23.08/landscape/client/broker/tests/test_service.py --- landscape-client-23.02/landscape/client/broker/tests/test_service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,11 @@ import os +from unittest.mock import Mock -from mock import Mock - -from landscape.client.tests.helpers import LandscapeTest -from landscape.client.broker.tests.helpers import BrokerConfigurationHelper +from landscape.client.broker.amp import RemoteBrokerConnector from landscape.client.broker.service import BrokerService +from landscape.client.broker.tests.helpers import BrokerConfigurationHelper from landscape.client.broker.transport import HTTPTransport -from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.tests.helpers import LandscapeTest from landscape.lib.testing import FakeReactor @@ -15,7 +14,7 @@ helpers = [BrokerConfigurationHelper] def setUp(self): - super(BrokerServiceTest, self).setUp() + super().setUp() class FakeBrokerService(BrokerService): reactor_factory = FakeReactor @@ -28,7 +27,8 @@ """ self.assertEqual( self.service.persist.filename, - os.path.join(self.config.data_path, "broker.bpickle")) + os.path.join(self.config.data_path, "broker.bpickle"), + ) def test_transport(self): """ diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_store.py landscape-client-23.08/landscape/client/broker/tests/test_store.py --- landscape-client-23.02/landscape/client/broker/tests/test_store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_store.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,22 +1,22 @@ import os -import mock - +from unittest import mock from twisted.python.compat import intToBytes +from landscape.client.broker.store import MessageStore +from landscape.client.tests.helpers import LandscapeTest from landscape.lib.bpickle import dumps from landscape.lib.persist import Persist -from landscape.lib.schema import InvalidError, Int, Bytes, Unicode +from landscape.lib.schema import Bytes +from landscape.lib.schema import Int +from landscape.lib.schema import InvalidError +from landscape.lib.schema import Unicode from landscape.message_schemas.message import Message -from landscape.client.broker.store import MessageStore - -from landscape.client.tests.helpers import LandscapeTest class MessageStoreTest(LandscapeTest): - def setUp(self): - super(MessageStoreTest, self).setUp() + super().setUp() self.temp_dir = self.makeDir() self.persist_filename = self.makeFile() self.store = self.create_store() @@ -137,13 +137,87 @@ self.assertEqual(self.store.get_pending_offset(), 0) self.assertEqual(self.store.get_pending_messages(), []) + def test_messages_over_limit(self): + """ + Create six messages, two per directory. Since there is a limit of + one directory then only the last 2 messages should be in the queue + """ + + self.store._directory_size = 2 + self.store._max_dirs = 1 + for num in range(6): # 0,1 2,3 4,5 + message = {"type": "data", "data": f"{num}".encode()} + self.store.add(message) + messages = self.store.get_pending_messages(200) + self.assertMessages( + messages, + [{"type": "data", "data": b"4"}, {"type": "data", "data": b"5"}], + ) + + def test_messages_under_limit(self): + """ + Create six messages, all of which should be in the queue, since 3 + directories are active + """ + + self.store._directory_size = 2 + self.store._max_dirs = 3 + messages_sent = [] + for num in range(6): # 0,1 2,3 4,5 + message = {"type": "data", "data": f"{num}".encode()} + messages_sent.append(message) + self.store.add(message) + messages = self.store.get_pending_messages(200) + self.assertMessages(messages, messages_sent) + + def test_messages_over_mb(self): + """ + Create three messages with the second one being very large. The + max size should get triggered, so all should be cleared except for the + last one. + """ + + self.store._directory_size = 2 + self.store._max_size_mb = 0.01 + self.store.add({"type": "data", "data": b"a"}) + self.store.add({"type": "data", "data": b"b" * 15000}) + self.store.add({"type": "data", "data": b"c"}) + messages = self.store.get_pending_messages(200) + self.assertMessages( + messages, + [{"type": "data", "data": b"c"}], + ) + + @mock.patch("shutil.rmtree") + def test_exception_on_message_limit(self, rmtree_mock): + """ + If an exception occurs while deleting it shouldn't affect the next + message sent + """ + rmtree_mock.side_effect = IOError("Error!") + self.store._directory_size = 1 + self.store._max_dirs = 1 + self.store.add({"type": "data", "data": b"a"}) + self.store.add({"type": "data", "data": b"b"}) + self.store.add({"type": "data", "data": b"c"}) + messages = self.store.get_pending_messages(200) + + self.assertMessages( + messages, + [ + {"type": "data", "data": b"a"}, + {"type": "data", "data": b"b"}, + {"type": "data", "data": b"c"}, + ], + ) + def test_one_message(self): self.store.add(dict(type="data", data=b"A thing")) messages = self.store.get_pending_messages(200) - self.assertMessages(messages, - [{"type": "data", - "data": b"A thing", - "api": b"3.2"}]) + self.assertMessages( + messages, + [{"type": "data", "data": b"A thing", "api": b"3.2"}], + ) def test_max_pending(self): for i in range(10): @@ -169,7 +243,7 @@ self.store.add(dict(type="data", data=intToBytes(i))) il = [m["data"] for m in self.store.get_pending_messages(60)] self.assertEqual(il, [intToBytes(i) for i in range(60)]) - self.assertEqual(set(os.listdir(self.temp_dir)), set(["0", "1", "2"])) + self.assertEqual(set(os.listdir(self.temp_dir)), {"0", "1", "2"}) self.store.set_pending_offset(60) self.store.delete_old_messages() @@ -177,23 +251,26 @@ def test_unaccepted(self): for i in range(10): - self.store.add(dict(type=["data", "unaccepted"][i % 2], - data=intToBytes(i))) + self.store.add( + dict(type=["data", "unaccepted"][i % 2], data=intToBytes(i)), + ) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, [intToBytes(i) for i in [0, 2, 4, 6, 8]]) def test_unaccepted_with_offset(self): for i in range(10): - self.store.add(dict(type=["data", "unaccepted"][i % 2], - data=intToBytes(i))) + self.store.add( + dict(type=["data", "unaccepted"][i % 2], data=intToBytes(i)), + ) self.store.set_pending_offset(2) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, [intToBytes(i) for i in [4, 6, 8]]) def test_unaccepted_reaccepted(self): for i in range(10): - self.store.add(dict(type=["data", "unaccepted"][i % 2], - data=intToBytes(i))) + self.store.add( + dict(type=["data", "unaccepted"][i % 2], data=intToBytes(i)), + ) self.store.set_pending_offset(2) il = [m["data"] for m in self.store.get_pending_messages(2)] self.store.set_accepted_types(["data", "unaccepted"]) @@ -202,8 +279,9 @@ def test_accepted_unaccepted(self): for i in range(10): - self.store.add(dict(type=["data", "unaccepted"][i % 2], - data=intToBytes(i))) + self.store.add( + dict(type=["data", "unaccepted"][i % 2], data=intToBytes(i)), + ) # Setting pending offset here means that the first two # messages, even though becoming unaccepted now, were already # accepted before, so they shouldn't be marked for hold. @@ -217,8 +295,9 @@ def test_accepted_unaccepted_old(self): for i in range(10): - self.store.add(dict(type=["data", "unaccepted"][i % 2], - data=intToBytes(i))) + self.store.add( + dict(type=["data", "unaccepted"][i % 2], data=intToBytes(i)), + ) self.store.set_pending_offset(2) self.store.set_accepted_types(["unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] @@ -233,8 +312,10 @@ # messages will also be delivered. self.store.set_accepted_types(["data", "unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] - self.assertEqual(il, [intToBytes(i) - for i in [1, 3, 5, 7, 9, 0, 2, 4, 6, 8]]) + self.assertEqual( + il, + [intToBytes(i) for i in [1, 3, 5, 7, 9, 0, 2, 4, 6, 8]], + ) def test_wb_handle_broken_messages(self): self.log_helper.ignore_errors(ValueError) @@ -281,8 +362,10 @@ messages = self.store.get_pending_messages() - self.assertEqual(messages, [{"type": "data", "data": b"2", - "api": b"3.2"}]) + self.assertEqual( + messages, + [{"type": "data", "data": b"2", "api": b"3.2"}], + ) self.store.set_pending_offset(len(messages)) @@ -307,11 +390,16 @@ # similar to unplugging the power -- i.e., we're not relying # on special exception-handling in the file-writing code. self.assertRaises( - IOError, self.store.add, {"type": "data", "data": 2}) + IOError, + self.store.add, + {"type": "data", "data": 2}, + ) mock_open.assert_called_with(mock.ANY, "wb") mock_open().write.assert_called_once_with(mock.ANY) - self.assertEqual(self.store.get_pending_messages(), - [{"type": "data", "data": 1, "api": b"3.2"}]) + self.assertEqual( + self.store.get_pending_messages(), + [{"type": "data", "data": 1, "api": b"3.2"}], + ) def test_get_server_api_default(self): """ @@ -331,8 +419,10 @@ By default messages are tagged with the 3.2 server API. """ self.store.add({"type": "empty"}) - self.assertEqual(self.store.get_pending_messages(), - [{"type": "empty", "api": b"3.2"}]) + self.assertEqual( + self.store.get_pending_messages(), + [{"type": "empty", "api": b"3.2"}], + ) def test_custom_api_on_store(self): """ @@ -341,14 +431,18 @@ """ self.store.set_server_api(b"3.3") self.store.add({"type": "empty"}) - self.assertEqual(self.store.get_pending_messages(), - [{"type": "empty", "api": b"3.3"}]) + self.assertEqual( + self.store.get_pending_messages(), + [{"type": "empty", "api": b"3.3"}], + ) def test_custom_api_on_messages(self): self.store.set_server_api(b"3.3") self.store.add({"type": "empty", "api": b"3.2"}) - self.assertEqual(self.store.get_pending_messages(), - [{"type": "empty", "api": b"3.2"}]) + self.assertEqual( + self.store.get_pending_messages(), + [{"type": "empty", "api": b"3.2"}], + ) def test_coercion(self): """ @@ -356,8 +450,11 @@ coerced according to the message schema for the type of the message. """ - self.assertRaises(InvalidError, - self.store.add, {"type": "data", "data": 3}) + self.assertRaises( + InvalidError, + self.store.add, + {"type": "data", "data": 3}, + ) def test_coercion_ignores_custom_api(self): """ @@ -372,12 +469,17 @@ the coercion. """ self.store.add_schema(Message("data", {"data": Unicode()})) - self.store.add({"type": "data", - "data": u"\N{HIRAGANA LETTER A}".encode("utf-8"), - "api": b"3.2"}) - self.assertEqual(self.store.get_pending_messages(), - [{"type": "data", "api": b"3.2", - "data": u"\N{HIRAGANA LETTER A}"}]) + self.store.add( + { + "type": "data", + "data": "\N{HIRAGANA LETTER A}".encode(), + "api": b"3.2", + }, + ) + self.assertEqual( + self.store.get_pending_messages(), + [{"type": "data", "api": b"3.2", "data": "\N{HIRAGANA LETTER A}"}], + ) def test_message_is_coerced_to_its_api_schema(self): """ @@ -392,7 +494,8 @@ self.store.add({"type": "data", "data": 123}) self.assertEqual( self.store.get_pending_messages(), - [{"type": "data", "api": b"3.3", "data": 123}]) + [{"type": "data", "api": b"3.3", "data": 123}], + ) def test_message_is_coerced_to_highest_compatible_api_schema(self): """ @@ -408,7 +511,8 @@ self.store.add({"type": "data", "data": b"foo"}) self.assertEqual( self.store.get_pending_messages(), - [{"type": "data", "api": b"3.2", "data": b"foo"}]) + [{"type": "data", "api": b"3.2", "data": b"foo"}], + ) def test_count_pending_messages(self): """It is possible to get the total number of pending messages.""" @@ -432,8 +536,7 @@ self.assertTrue(os.path.exists(filename)) store = MessageStore(Persist(filename=filename), self.temp_dir) - self.assertEqual(set(store.get_accepted_types()), - set(["foo", "bar"])) + self.assertEqual(set(store.get_accepted_types()), {"foo", "bar"}) def test_is_pending_pre_and_post_message_delivery(self): self.log_helper.ignore_errors(ValueError) @@ -507,8 +610,7 @@ self.assertEqual(global_session_id1, global_session_id2) def test_get_session_id_unique_for_each_scope(self): - """We get a unique session id for differing scopes. - """ + """We get a unique session id for differing scopes.""" session_id1 = self.store.get_session_id() session_id2 = self.store.get_session_id(scope="other") self.assertNotEqual(session_id1, session_id2) @@ -518,14 +620,14 @@ default. """ session_id = self.store.get_session_id() - persisted_ids = self.store._persist.get('session-ids') + persisted_ids = self.store._persist.get("session-ids") scope = persisted_ids[session_id] self.assertIs(None, scope) def test_get_session_id_with_scope(self): """Test that we can generate a session id within a limited scope.""" session_id = self.store.get_session_id(scope="hwinfo") - persisted_ids = self.store._persist.get('session-ids') + persisted_ids = self.store._persist.get("session-ids") scope = persisted_ids[session_id] self.assertEqual("hwinfo", scope) @@ -592,8 +694,7 @@ def test_record_failure_sets_first_failure_time(self): """first-failure-time recorded when calling record_failure().""" self.store.record_failure(123) - self.assertEqual( - 123, self.store._persist.get("first-failure-time")) + self.assertEqual(123, self.store._persist.get("first-failure-time")) def test_messages_rejected_if_failure_older_than_one_week(self): """Messages stop accumulating after one week of not being sent.""" @@ -602,10 +703,12 @@ self.assertIsNot(None, self.store.add({"type": "empty"})) self.store.record_failure((7 * 24 * 60 * 60) + 1) self.assertIs(None, self.store.add({"type": "empty"})) - self.assertIn("WARNING: Unable to succesfully communicate with " - "Landscape server for more than a week. Waiting for " - "resync.", - self.logfile.getvalue()) + self.assertIn( + "WARNING: Unable to succesfully communicate with " + "Landscape server for more than a week. Waiting for " + "resync.", + self.logfile.getvalue(), + ) # Resync message and the first one we added right on the week boundary self.assertEqual(2, len(self.store.get_pending_messages())) @@ -618,8 +721,10 @@ self.store.record_failure((7 * 24 * 60 * 60) + 1) self.store.add({"type": "empty"}) self.assertIs(None, self.store.add({"type": "empty"})) - self.assertIn("DEBUG: Dropped message, awaiting resync.", - self.logfile.getvalue()) + self.assertIn( + "DEBUG: Dropped message, awaiting resync.", + self.logfile.getvalue(), + ) def test_after_clearing_blackhole_messages_are_accepted_again(self): """After a successful exchange, messages are accepted again.""" @@ -644,13 +749,13 @@ filename = os.path.join(self.temp_dir, "0", "0") os.makedirs(os.path.dirname(filename)) with open(filename, "wb") as fh: - fh.write(dumps({b"type": b"data", - b"data": b"A thing", - b"api": b"3.2"})) + fh.write( + dumps({b"type": b"data", b"data": b"A thing", b"api": b"3.2"}), + ) [message] = self.store.get_pending_messages() # message keys are decoded - self.assertIn(u"type", message) - self.assertIn(u"api", message) - self.assertIsInstance(message[u"api"], bytes) # api is bytes - self.assertEqual(u"data", message[u"type"]) # message type is decoded - self.assertEqual(b"A thing", message[u"data"]) # other are kept as-is + self.assertIn("type", message) + self.assertIn("api", message) + self.assertIsInstance(message["api"], bytes) # api is bytes + self.assertEqual("data", message["type"]) # message type is decoded + self.assertEqual(b"A thing", message["data"]) # other are kept as-is diff -Nru landscape-client-23.02/landscape/client/broker/tests/test_transport.py landscape-client-23.08/landscape/client/broker/tests/test_transport.py --- landscape-client-23.02/landscape/client/broker/tests/test_transport.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/tests/test_transport.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,18 @@ -# -*- coding: utf-8 -*- import os +from twisted.internet import reactor +from twisted.internet.ssl import DefaultOpenSSLContextFactory +from twisted.internet.threads import deferToThread +from twisted.web import resource +from twisted.web import server + from landscape import VERSION from landscape.client.broker.transport import HTTPTransport +from landscape.client.tests.helpers import LandscapeTest from landscape.lib import bpickle from landscape.lib.fetch import PyCurlError from landscape.lib.testing import LogKeeperHelper -from landscape.client.tests.helpers import LandscapeTest - -from twisted.web import server, resource -from twisted.internet import reactor -from twisted.internet.ssl import DefaultOpenSSLContextFactory -from twisted.internet.threads import deferToThread - def sibpath(path): return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) @@ -29,7 +28,7 @@ request = content = None - def getChild(self, request, name): + def getChild(self, request, name): # noqa: N802 return self def render(self, request): @@ -43,23 +42,33 @@ helpers = [LogKeeperHelper] def setUp(self): - super(HTTPTransportTest, self).setUp() + super().setUp() self.ports = [] def tearDown(self): - super(HTTPTransportTest, self).tearDown() + super().tearDown() for port in self.ports: port.stopListening() def request_with_payload(self, payload): resource = DataCollectingResource() port = reactor.listenTCP( - 0, server.Site(resource), interface="127.0.0.1") + 0, + server.Site(resource), + interface="127.0.0.1", + ) self.ports.append(port) transport = HTTPTransport( - None, "http://localhost:%d/" % (port.getHost().port,)) - result = deferToThread(transport.exchange, payload, computer_id="34", - exchange_token="abcd-efgh", message_api="X.Y") + None, + f"http://localhost:{port.getHost().port:d}/", + ) + result = deferToThread( + transport.exchange, + payload, + computer_id="34", + exchange_token="abcd-efgh", + message_api="X.Y", + ) def got_result(ignored): try: @@ -70,12 +79,15 @@ def get_header(header): return [resource.request.received_headers[header]] - self.assertEqual(get_header(u"x-computer-id"), ["34"]) + self.assertEqual(get_header("x-computer-id"), ["34"]) self.assertEqual(get_header("x-exchange-token"), ["abcd-efgh"]) self.assertEqual( - get_header("user-agent"), ["landscape-client/%s" % (VERSION,)]) + get_header("user-agent"), + [f"landscape-client/{VERSION}"], + ) self.assertEqual(get_header("x-message-api"), ["X.Y"]) self.assertEqual(bpickle.loads(resource.content), payload) + result.addCallback(got_result) return result @@ -103,7 +115,7 @@ When a payload contains unicode characters they are properly handled by bpickle. """ - return self.request_with_payload(payload=u"проба") + return self.request_with_payload(payload="проба") def test_ssl_verification_positive(self): """ @@ -113,13 +125,24 @@ """ resource = DataCollectingResource() context_factory = DefaultOpenSSLContextFactory(PRIVKEY, PUBKEY) - port = reactor.listenSSL(0, server.Site(resource), context_factory, - interface="127.0.0.1") + port = reactor.listenSSL( + 0, + server.Site(resource), + context_factory, + interface="127.0.0.1", + ) self.ports.append(port) transport = HTTPTransport( - None, "https://localhost:%d/" % (port.getHost().port,), PUBKEY) - result = deferToThread(transport.exchange, "HI", computer_id="34", - message_api="X.Y") + None, + f"https://localhost:{port.getHost().port:d}/", + PUBKEY, + ) + result = deferToThread( + transport.exchange, + "HI", + computer_id="34", + message_api="X.Y", + ) def got_result(ignored): try: @@ -129,11 +152,15 @@ # without requestHeaders def get_header(header): return [resource.request.received_headers[header]] + self.assertEqual(get_header("x-computer-id"), ["34"]) self.assertEqual( - get_header("user-agent"), ["landscape-client/%s" % (VERSION,)]) + get_header("user-agent"), + [f"landscape-client/{VERSION}"], + ) self.assertEqual(get_header("x-message-api"), ["X.Y"]) self.assertEqual(bpickle.loads(resource.content), "HI") + result.addCallback(got_result) return result @@ -145,21 +172,34 @@ """ self.log_helper.ignore_errors(PyCurlError) r = DataCollectingResource() - context_factory = DefaultOpenSSLContextFactory( - BADPRIVKEY, BADPUBKEY) - port = reactor.listenSSL(0, server.Site(r), context_factory, - interface="127.0.0.1") + context_factory = DefaultOpenSSLContextFactory(BADPRIVKEY, BADPUBKEY) + port = reactor.listenSSL( + 0, + server.Site(r), + context_factory, + interface="127.0.0.1", + ) self.ports.append(port) - transport = HTTPTransport(None, "https://localhost:%d/" - % (port.getHost().port,), pubkey=PUBKEY) - - result = deferToThread(transport.exchange, "HI", computer_id="34", - message_api="X.Y") + transport = HTTPTransport( + None, + f"https://localhost:{port.getHost().port:d}/", + pubkey=PUBKEY, + ) + + result = deferToThread( + transport.exchange, + "HI", + computer_id="34", + message_api="X.Y", + ) def got_result(ignored): self.assertIs(r.request, None) self.assertIs(r.content, None) - self.assertTrue("server certificate verification failed" - in self.logfile.getvalue()) + self.assertTrue( + "server certificate verification failed" + in self.logfile.getvalue(), + ) + result.addErrback(got_result) return result diff -Nru landscape-client-23.02/landscape/client/broker/transport.py landscape-client-23.08/landscape/client/broker/transport.py --- landscape-client-23.02/landscape/client/broker/transport.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/broker/transport.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,20 +1,21 @@ """Low-level server communication.""" -import time import logging import pprint +import time import uuid import pycurl -from landscape.lib.compat import unicode, _PY3 - +from landscape import SERVER_API +from landscape import VERSION from landscape.lib import bpickle +from landscape.lib.compat import _PY3 +from landscape.lib.compat import unicode from landscape.lib.fetch import fetch from landscape.lib.format import format_delta -from landscape import SERVER_API, VERSION -class HTTPTransport(object): +class HTTPTransport: """Transport makes a request to exchange message data over HTTP. @param url: URL of the remote Landscape server message system. @@ -40,9 +41,11 @@ # assigning them to the headers. if _PY3 and isinstance(message_api, bytes): message_api = message_api.decode("ascii") - headers = {"X-Message-API": message_api, - "User-Agent": "landscape-client/%s" % VERSION, - "Content-Type": "application/octet-stream"} + headers = { + "X-Message-API": message_api, + "User-Agent": f"landscape-client/{VERSION}", + "Content-Type": "application/octet-stream", + } if computer_id: if _PY3 and isinstance(computer_id, bytes): computer_id = computer_id.decode("ascii") @@ -52,11 +55,25 @@ exchange_token = exchange_token.decode("ascii") headers["X-Exchange-Token"] = str(exchange_token) curl = pycurl.Curl() - return (curl, fetch(self._url, post=True, data=payload, - headers=headers, cainfo=self._pubkey, curl=curl)) - - def exchange(self, payload, computer_id=None, exchange_token=None, - message_api=SERVER_API): + return ( + curl, + fetch( + self._url, + post=True, + data=payload, + headers=headers, + cainfo=self._pubkey, + curl=curl, + ), + ) + + def exchange( + self, + payload, + computer_id=None, + exchange_token=None, + message_api=SERVER_API, + ): """Exchange message data with the server. @param payload: The object to send, it must be L{bpickle}-compatible. @@ -78,30 +95,39 @@ if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: logging.debug("Sending payload:\n%s", pprint.pformat(payload)) try: - curly, data = self._curl(spayload, computer_id, exchange_token, - message_api) + curly, data = self._curl( + spayload, + computer_id, + exchange_token, + message_api, + ) except Exception: - logging.exception("Error contacting the server at %s." % self._url) + logging.exception(f"Error contacting the server at {self._url}.") raise else: - logging.info("Sent %d bytes and received %d bytes in %s.", - len(spayload), len(data), - format_delta(time.time() - start_time)) + logging.info( + "Sent %d bytes and received %d bytes in %s.", + len(spayload), + len(data), + format_delta(time.time() - start_time), + ) try: response = bpickle.loads(data) except Exception: - logging.exception("Server returned invalid data: %r" % data) + logging.exception(f"Server returned invalid data: {data!r}") return None else: if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: logging.debug( - "Received payload:\n%s", pprint.pformat(response)) + "Received payload:\n%s", + pprint.pformat(response), + ) return response -class FakeTransport(object): +class FakeTransport: """Fake transport for testing purposes.""" def __init__(self, reactor=None, url=None, pubkey=None): @@ -123,8 +149,13 @@ def set_url(self, url): self._url = url - def exchange(self, payload, computer_id=None, exchange_token=None, - message_api=SERVER_API): + def exchange( + self, + payload, + computer_id=None, + exchange_token=None, + message_api=SERVER_API, + ): self.payloads.append(payload) self.computer_id = computer_id self.exchange_token = exchange_token @@ -140,8 +171,10 @@ if isinstance(response, Exception): raise response - result = {"next-expected-sequence": self.next_expected_sequence, - "next-exchange-token": unicode(uuid.uuid4()), - "messages": response} + result = { + "next-expected-sequence": self.next_expected_sequence, + "next-exchange-token": unicode(uuid.uuid4()), + "messages": response, + } result.update(self.extra) return result diff -Nru landscape-client-23.02/landscape/client/configuration.py landscape-client-23.08/landscape/client/configuration.py --- landscape-client-23.02/landscape/client/configuration.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/configuration.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,35 +3,36 @@ This module, and specifically L{LandscapeSetupScript}, implements the support for the C{landscape-config} script. """ - -from __future__ import print_function - -from functools import partial import getpass import io import os import pwd +import shlex import sys import textwrap +from functools import partial +from urllib.parse import urlparse -from landscape.lib.compat import input +from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.broker.config import BrokerConfiguration +from landscape.client.broker.registration import Identity +from landscape.client.broker.registration import RegistrationError +from landscape.client.broker.service import BrokerService +from landscape.client.reactor import LandscapeReactor +from landscape.client.serviceconfig import ServiceConfig +from landscape.client.serviceconfig import ServiceConfigException from landscape.lib import base64 - -from landscape.lib.tag import is_valid_tag - -from landscape.client.sysvconfig import SysVConfig, ProcessError from landscape.lib.amp import MethodCallError -from landscape.lib.twisted_util import gather_results -from landscape.lib.fetch import fetch, FetchError +from landscape.lib.bootstrap import BootstrapDirectory +from landscape.lib.bootstrap import BootstrapList +from landscape.lib.compat import input +from landscape.lib.fetch import fetch +from landscape.lib.fetch import FetchError from landscape.lib.fs import create_binary_file -from landscape.lib.bootstrap import BootstrapList, BootstrapDirectory +from landscape.lib.network import get_fqdn from landscape.lib.persist import Persist -from landscape.client.reactor import LandscapeReactor -from landscape.client.broker.registration import RegistrationError -from landscape.client.broker.config import BrokerConfiguration -from landscape.client.broker.amp import RemoteBrokerConnector -from landscape.client.broker.registration import Identity -from landscape.client.broker.service import BrokerService +from landscape.lib.tag import is_valid_tag +from landscape.lib.twisted_util import gather_results EXIT_NOT_REGISTERED = 5 @@ -46,8 +47,8 @@ def print_text(text, end="\n", error=False): - """Display the given text to the user, using stderr if flagged as an error. - """ + """Display the given text to the user, using stderr + if flagged as an error.""" if error: stream = sys.stderr else: @@ -66,7 +67,7 @@ """Prompt for a yes/no question and return the answer as bool.""" default_msg = "[Y/n]" if default else "[y/N]" while True: - value = input("{} {}: ".format(message, default_msg)).lower() + value = input(f"{message} {default_msg}: ").lower() if value: if value.startswith("n"): return False @@ -87,7 +88,8 @@ if "ALL" in user_list: if len(user_list) > 1: raise ConfigurationError( - "Extra users specified with ALL users") + "Extra users specified with ALL users", + ) user_list.remove("ALL") invalid_users = [] for user in user_list: @@ -100,8 +102,15 @@ class LandscapeSetupConfiguration(BrokerConfiguration): - unsaved_options = ("no_start", "disable", "silent", "ok_no_register", - "import_from") + unsaved_options = ( + "no_start", + "disable", + "silent", + "ok_no_register", + "import_from", + ) + + encoding = "utf-8" def _load_external_options(self): """Handle the --import parameter. @@ -123,18 +132,23 @@ content = self.fetch_import_url(self.import_from) parser = self._get_config_object( alternative_config=io.StringIO( - content.decode("utf-8"))) + content.decode("utf-8"), + ), + ) elif not os.path.isfile(self.import_from): - raise ImportOptionError("File %s doesn't exist." % - self.import_from) + raise ImportOptionError( + f"File {self.import_from} doesn't exist.", + ) else: try: parser = self._get_config_object( - alternative_config=self.import_from) + alternative_config=self.import_from, + ) except Exception: raise ImportOptionError( - "Couldn't read configuration from %s." % - self.import_from) + "Couldn't read configuration " + f"from {self.import_from}.", + ) except Exception as error: raise ImportOptionError(str(error)) @@ -143,15 +157,16 @@ if parser and self.config_section in parser: options = parser[self.config_section] if not options: - raise ImportOptionError("Nothing to import at %s." % - self.import_from) + raise ImportOptionError( + f"Nothing to import at {self.import_from}.", + ) options.update(self._command_line_options) self._command_line_options = options def fetch_import_url(self, url): """Handle fetching of URLs passed to --url.""" - print_text("Fetching configuration from %s..." % url) + print_text(f"Fetching configuration from {url}...") error_message = None try: content = fetch(url) @@ -159,52 +174,79 @@ error_message = str(error) if error_message is not None: raise ImportOptionError( - "Couldn't download configuration from %s: %s" % - (url, error_message)) + f"Couldn't download configuration from {url}: {error_message}", + ) return content def make_parser(self): """ Specialize the parser, adding configure-specific options. """ - parser = super(LandscapeSetupConfiguration, self).make_parser() + parser = super().make_parser() - parser.add_option("--import", dest="import_from", - metavar="FILENAME_OR_URL", - help="Filename or URL to import configuration from. " - "Imported options behave as if they were " - "passed in the command line, with precedence " - "being given to real command line options.") - parser.add_option("--script-users", metavar="USERS", - help="A comma-separated list of users to allow " - "scripts to run. To allow scripts to be run " - "by any user, enter: ALL") - parser.add_option("--include-manager-plugins", metavar="PLUGINS", - default="", - help="A comma-separated list of manager plugins to " - "load.") - parser.add_option("-n", "--no-start", action="store_true", - help="Don't start the client automatically.") - parser.add_option("--ok-no-register", action="store_true", - help="Return exit code 0 instead of 2 if the client " - "can't be registered.") - parser.add_option("--silent", action="store_true", default=False, - help="Run without manual interaction.") - parser.add_option("--disable", action="store_true", default=False, - help="Stop running clients and disable start at " - "boot.") - parser.add_option("--init", action="store_true", default=False, - help="Set up the client directories structure " - "and exit.") - parser.add_option("--is-registered", action="store_true", - help="Exit with code 0 (success) if client is " - "registered else returns {}. Displays " - "registration info." - .format(EXIT_NOT_REGISTERED)) + parser.add_option( + "--import", + dest="import_from", + metavar="FILENAME_OR_URL", + help="Filename or URL to import configuration from. " + "Imported options behave as if they were " + "passed in the command line, with precedence " + "being given to real command line options.", + ) + parser.add_option( + "--script-users", + metavar="USERS", + help="A comma-separated list of users to allow " + "scripts to run. To allow scripts to be run " + "by any user, enter: ALL", + ) + parser.add_option( + "--include-manager-plugins", + metavar="PLUGINS", + default="", + help="A comma-separated list of manager plugins to " "load.", + ) + parser.add_option( + "-n", + "--no-start", + action="store_true", + help="Don't start the client automatically.", + ) + parser.add_option( + "--ok-no-register", + action="store_true", + help="Return exit code 0 instead of 2 if the client " + "can't be registered.", + ) + parser.add_option( + "--silent", + action="store_true", + default=False, + help="Run without manual interaction.", + ) + parser.add_option( + "--disable", + action="store_true", + default=False, + help="Stop running clients and disable start at " "boot.", + ) + parser.add_option( + "--init", + action="store_true", + default=False, + help="Set up the client directories structure " "and exit.", + ) + parser.add_option( + "--is-registered", + action="store_true", + help="Exit with code 0 (success) if client is " + "registered else returns {}. Displays " + "registration info.".format(EXIT_NOT_REGISTERED), + ) return parser -class LandscapeSetupScript(object): +class LandscapeSetupScript: """ An interactive procedure which manages the prompting and temporary storage of configuration parameters. @@ -218,6 +260,16 @@ def __init__(self, config): self.config = config + self.landscape_domain = None + + def get_domain(self): + domain = self.landscape_domain + if domain: + return domain + url = self.config.url + if url: + return urlparse(url).netloc + return "landscape.canonical.com" def prompt_get_input(self, msg, required): """Prompt the user on the terminal for a value @@ -226,14 +278,14 @@ @param required: True if value must be entered """ while True: - value = input(msg) + value = input(msg).strip() if value: return value elif not required: break show_help("This option is required to configure Landscape.") - def prompt(self, option, msg, required=False): + def prompt(self, option, msg, required=False, default=None): """Prompt the user on the terminal for a value. @param option: The attribute of C{self.config} that contains the @@ -241,16 +293,21 @@ @param msg: The message to prompt the user with (via C{input}). @param required: If True, the user will be required to enter a value before continuing. + @param default: Default only if set and no current value present """ - default = getattr(self.config, option, None) + cur_value = getattr(self.config, option, None) + if cur_value: + default = cur_value if default: - msg += " [%s]: " % default + msg += f" [{default}]: " else: msg += ": " required = required and not (bool(default)) result = self.prompt_get_input(msg, required) if result: setattr(self.config, option, result) + elif default: + setattr(self.config, option, default) def password_prompt(self, option, msg, required=False): """Prompt the user on the terminal for a password and mask the value. @@ -287,25 +344,31 @@ show_help( """ The computer title you provide will be used to represent this - computer in the Landscape user interface. It's important to use - a title that will allow the system to be easily recognized when - it appears on the pending computers page. - """) - - self.prompt("computer_title", "This computer's title", True) + computer in the Landscape dashboard. + """, + ) + self.prompt( + "computer_title", + "This computer's title", + False, + default=get_fqdn(), + ) def query_account_name(self): if "account_name" in self.config.get_command_line_options(): return - show_help( - """ - You must now specify the name of the Landscape account you - want to register this computer with. Your account name is shown - under 'Account name' at https://landscape.canonical.com . - """) - - self.prompt("account_name", "Account name", True) + if not self.landscape_domain: + show_help( + """ + You must now specify the name of the Landscape account you + want to register this computer with. Your account name is shown + under 'Account name' at https://landscape.canonical.com . + """, + ) + self.prompt("account_name", "Account name", True) + else: + self.config.account_name = "standalone" def query_registration_key(self): command_line_options = self.config.get_command_line_options() @@ -313,18 +376,15 @@ return show_help( - """ - A registration key may be associated with your Landscape - account to prevent unauthorized registration attempts. This - is not your personal login password. It is optional, and unless - explicitly set on the server, it may be skipped here. - - If you don't remember the registration key you can find it - at https://landscape.canonical.com/account/%s - """ % self.config.account_name) + f""" + A Registration Key prevents unauthorized registration attempts. + + Provide the Registration Key found at: + https://{self.get_domain()}/account/{self.config.account_name} + """, # noqa: E501 + ) - self.password_prompt("registration_key", - "Account registration key") + self.password_prompt("registration_key", "(Optional) Registration Key") def query_proxies(self): options = self.config.get_command_line_options() @@ -333,11 +393,10 @@ show_help( """ - The Landscape client communicates with the server over HTTP and - HTTPS. If your network requires you to use a proxy to access HTTP - and/or HTTPS web sites, please provide the address of these - proxies now. If you don't use a proxy, leave these fields empty. - """) + If your network requires you to use a proxy, provide the address of + these proxies now. + """, + ) if "http_proxy" not in options: self.prompt("http_proxy", "HTTP proxy URL") @@ -349,47 +408,12 @@ if "include_manager_plugins" in options and "script_users" in options: invalid_users = get_invalid_users(options["script_users"]) if invalid_users: - raise ConfigurationError("Unknown system users: %s" % - ", ".join(invalid_users)) + raise ConfigurationError( + "Unknown system users: {}".format( + ", ".join(invalid_users), + ), + ) return - show_help( - """ - Landscape has a feature which enables administrators to run - arbitrary scripts on machines under their control. By default this - feature is disabled in the client, disallowing any arbitrary script - execution. If enabled, the set of users that scripts may run as is - also configurable. - """) - msg = "Enable script execution?" - included_plugins = [ - p.strip() for p in self.config.include_manager_plugins.split(",")] - if included_plugins == [""]: - included_plugins = [] - default = "ScriptExecution" in included_plugins - if prompt_yes_no(msg, default=default): - if "ScriptExecution" not in included_plugins: - included_plugins.append("ScriptExecution") - show_help( - """ - By default, scripts are restricted to the 'landscape' and - 'nobody' users. Please enter a comma-delimited list of users - that scripts will be restricted to. To allow scripts to be run - by any user, enter "ALL". - """) - while True: - self.prompt("script_users", "Script users") - invalid_users = get_invalid_users( - self.config.script_users) - if not invalid_users: - break - else: - show_help("Unknown system users: {}".format( - ",".join(invalid_users))) - self.config.script_users = None - else: - if "ScriptExecution" in included_plugins: - included_plugins.remove("ScriptExecution") - self.config.include_manager_plugins = ", ".join(included_plugins) def query_access_group(self): """Query access group from the user.""" @@ -399,7 +423,8 @@ show_help( "You may provide an access group for this computer " - "e.g. webservers.") + "e.g. webservers.", + ) self.prompt("access_group", "Access group", False) def _get_invalid_tags(self, tagnames): @@ -419,53 +444,97 @@ if "tags" in options: invalid_tags = self._get_invalid_tags(options["tags"]) if invalid_tags: - raise ConfigurationError("Invalid tags: %s" % - ", ".join(invalid_tags)) + raise ConfigurationError( + "Invalid tags: {}".format(", ".join(invalid_tags)), + ) return + def query_landscape_edition(self): show_help( - "You may provide tags for this computer e.g. server,precise.") - while True: - self.prompt("tags", "Tags", False) - if self._get_invalid_tags(self.config.tags): - show_help( - "Tag names may only contain alphanumeric characters.") - self.config.tags = None # Reset for the next prompt - else: - break - - def show_header(self): - show_help( + "Manage this machine with Landscape " + "(https://ubuntu.com/landscape):\n", + ) + options = self.config.get_command_line_options() + if "ping_url" in options and "url" in options: + return + if prompt_yes_no( + "Will you be using your own Self-Hosted Landscape installation?", + default=False, + ): + show_help( + "Provide the fully qualified domain name " + "of your Landscape Server e.g. " + "landscape.yourdomain.com", + ) + self.landscape_domain = self.prompt_get_input( + "Landscape Domain: ", + True, + ) + self.config.ping_url = f"http://{self.landscape_domain}/ping" + self.config.url = f"https://{self.landscape_domain}/message-system" + else: + self.landscape_domain = "" + self.config.ping_url = self.config._command_line_defaults[ + "ping_url" + ] + self.config.url = self.config._command_line_defaults["url"] + if self.config.account_name == "standalone": + self.config.account_name = "" + + def show_summary(self): + + tx = f"""A summary of the provided information: + Computer's Title: {self.config.computer_title} + Account Name: {self.config.account_name} + Landscape FQDN: {self.get_domain()} + Registration Key: {True if self.config.registration_key else False} """ - This script will interactively set up the Landscape client. It will - ask you a few questions about this computer and your Landscape - account, and will submit that information to the Landscape server. - After this computer is registered it will need to be approved by an - account administrator on the pending computers page. - - Please see https://landscape.canonical.com for more information. - """) + show_help(tx) + if self.config.http_proxy or self.config.https_proxy: + show_help( + f"""HTTPS Proxy: {self.config.https_proxy} + HTTP Proxy: {self.config.http_proxy}""", + ) + + params = ( + "account_name", + "url", + "ping_url", + "registration_key", + "https_proxy", + "http_proxy", + ) + cmd = ["sudo", "landscape-config"] + for param in params: + value = self.config.get(param) + if value: + if param in self.config._command_line_defaults: + if value == self.config._command_line_defaults[param]: + continue + cmd.append("--" + param.replace("_", "-")) + if param == "registration_key": + cmd.append("HIDDEN") + else: + cmd.append(shlex.quote(value)) + show_help( + "The landscape-config parameters to repeat this registration" + " on another machine are:", + ) + show_help(" ".join(cmd)) def run(self): """Kick off the interactive process which prompts the user for data. Data will be saved to C{self.config}. """ - self.show_header() + self.query_landscape_edition() self.query_computer_title() self.query_account_name() self.query_registration_key() self.query_proxies() self.query_script_plugin() - self.query_access_group() self.query_tags() - - -def setup_init_script_and_start_client(): - "Configure the init script to start the client on boot." - # XXX This function is misnamed; it doesn't start the client. - sysvconfig = SysVConfig() - sysvconfig.set_start_on_boot(True) + self.show_summary() def stop_client_and_disable_init_script(): @@ -473,9 +542,8 @@ Stop landscape-client and change configuration to prevent starting landscape-client on boot. """ - sysvconfig = SysVConfig() - sysvconfig.stop_landscape() - sysvconfig.set_start_on_boot(False) + ServiceConfig.stop_landscape() + ServiceConfig.set_start_on_boot(False) def setup_http_proxy(config): @@ -496,8 +564,9 @@ """ if config.silent and not config.no_start: if not (config.get("account_name") and config.get("computer_title")): - raise ConfigurationError("An account name and computer title are " - "required.") + raise ConfigurationError( + "An account name and computer title are " "required.", + ) def check_script_users(config): @@ -508,8 +577,9 @@ if config.get("script_users"): invalid_users = get_invalid_users(config.get("script_users")) if invalid_users: - raise ConfigurationError("Unknown system users: %s" % - ", ".join(invalid_users)) + raise ConfigurationError( + "Unknown system users: {}".format(", ".join(invalid_users)), + ) if not config.include_manager_plugins: config.include_manager_plugins = "ScriptExecution" @@ -523,9 +593,9 @@ # certificate, but the actual certificate itself. if config.ssl_public_key and config.ssl_public_key.startswith("base64:"): decoded_cert = base64.decodebytes( - config.ssl_public_key[7:].encode("ascii")) - config.ssl_public_key = store_public_key_data( - config, decoded_cert) + config.ssl_public_key[7:].encode("ascii"), + ) + config.ssl_public_key = store_public_key_data(config, decoded_cert) def setup(config): @@ -538,19 +608,11 @@ """ bootstrap_tree(config) - sysvconfig = SysVConfig() if not config.no_start: if config.silent: - setup_init_script_and_start_client() - elif not sysvconfig.is_configured_to_run(): - answer = prompt_yes_no( - "\nThe Landscape client must be started " - "on boot to operate correctly.\n\n" - "Start Landscape client on boot?") - if answer: - setup_init_script_and_start_client() - else: - sys.exit("Aborting Landscape configuration") + ServiceConfig.set_start_on_boot(True) + elif not ServiceConfig.is_configured_to_run(): + ServiceConfig.set_start_on_boot(True) setup_http_proxy(config) check_account_name_and_password(config) @@ -564,11 +626,14 @@ # Restart the client to ensure that it's using the new configuration. if not config.no_start: try: - sysvconfig.restart_landscape() - except ProcessError: - print_text("Couldn't restart the Landscape client.", error=True) - print_text("This machine will be registered with the provided " - "details when the client runs.", error=True) + ServiceConfig.restart_landscape() + except ServiceConfigException as exc: + print_text(str(exc), error=True) + print_text( + "This machine will be registered with the provided " + "details when the client runs.", + error=True, + ) exit_code = 2 if config.ok_no_register: exit_code = 0 @@ -579,10 +644,17 @@ """Create the client directories tree.""" bootstrap_list = [ BootstrapDirectory("$data_path", "landscape", "root", 0o755), - BootstrapDirectory("$annotations_path", "landscape", "landscape", - 0o755)] + BootstrapDirectory( + "$annotations_path", + "landscape", + "landscape", + 0o755, + ), + ] BootstrapList(bootstrap_list).bootstrap( - data_path=config.data_path, annotations_path=config.annotations_path) + data_path=config.data_path, + annotations_path=config.annotations_path, + ) def store_public_key_data(config, certificate_data): @@ -598,8 +670,9 @@ """ key_filename = os.path.join( config.data_path, - os.path.basename(config.get_config_filename() + ".ssl_public_key")) - print_text("Writing SSL CA certificate to %s..." % key_filename) + os.path.basename(config.get_config_filename() + ".ssl_public_key"), + ) + print_text(f"Writing SSL CA certificate to {key_filename}...") create_binary_file(key_filename, certificate_data) return key_filename @@ -647,13 +720,18 @@ def got_connection(add_result, connector, reactor, remote): """Handle becomming connected to a broker.""" - handlers = {"registration-done": partial(success, add_result), - "registration-failed": partial(failure, add_result), - "exchange-failed": partial(exchange_failure, add_result)} + handlers = { + "registration-done": partial(success, add_result), + "registration-failed": partial(failure, add_result), + "exchange-failed": partial(exchange_failure, add_result), + } deferreds = [ remote.call_on_event(handlers), remote.register().addErrback( - partial(handle_registration_errors, add_result), connector)] + partial(handle_registration_errors, add_result), + connector, + ), + ] results = gather_results(deferreds) results.addCallback(done, connector, reactor) return results @@ -667,9 +745,15 @@ reactor.stop() -def register(config, reactor=None, connector_factory=RemoteBrokerConnector, - got_connection=got_connection, max_retries=14, on_error=None, - results=None): +def register( + config, + reactor=None, + connector_factory=RemoteBrokerConnector, + got_connection=got_connection, + max_retries=14, + on_error=None, + results=None, +): """Instruct the Landscape Broker to register the client. The broker will be instructed to reload its configuration and then to @@ -701,9 +785,11 @@ connector = connector_factory(reactor, config) connection = connector.connect(max_retries=max_retries, quiet=True) connection.addCallback( - partial(got_connection, add_result, connector, reactor)) + partial(got_connection, add_result, connector, reactor), + ) connection.addErrback( - partial(got_error, reactor=reactor, add_result=add_result)) + partial(got_error, reactor=reactor, add_result=add_result), + ) reactor.run() assert len(results) == 1, "We expect exactly one result." @@ -721,27 +807,30 @@ def report_registration_outcome(what_happened, print=print): - """Report the registration interaction outcome to the user in human-readable - form. + """Report the registration interaction outcome to the user in + human-readable form. """ messages = { - "success": "System successfully registered.", + "success": "Registration request sent successfully.", "unknown-account": "Invalid account name or registration key.", "max-pending-computers": ( "Maximum number of computers pending approval reached. ", "Login to your Landscape server account page to manage " - "pending computer approvals."), + "pending computer approvals.", + ), "ssl-error": ( "\nThe server's SSL information is incorrect, or fails " "signature verification!\n" "If the server is using a self-signed certificate, " "please ensure you supply it with the --ssl-public-key " - "parameter."), + "parameter." + ), "non-ssl-error": ( "\nWe were unable to contact the server.\n" "Your internet connection may be down. " "The landscape client will continue to try and contact " - "the server periodically.") + "the server periodically." + ), } message = messages.get(what_happened) if message: @@ -762,28 +851,34 @@ def is_registered(config): """Return whether the client is already registered.""" persist_filename = os.path.join( - config.data_path, "{}.bpickle".format(BrokerService.service_name)) + config.data_path, + f"{BrokerService.service_name}.bpickle", + ) persist = Persist(filename=persist_filename) identity = Identity(config, persist) return bool(identity.secure_id) def registration_info_text(config, registration_status): - ''' + """ A simple output displaying whether the client is registered or not, the account name, and config and data paths - ''' + """ config_path = os.path.abspath(config._config_filename) - text = textwrap.dedent(""" + text = textwrap.dedent( + """ Registered: {} Config Path: {} - Data Path {}""" - .format(registration_status, config_path, - config.data_path)) + Data Path {}""".format( + registration_status, + config_path, + config.data_path, + ), + ) if registration_status: - text += '\nAccount Name: {}'.format(config.account_name) + text += f"\nAccount Name: {config.account_name}" return text @@ -829,8 +924,6 @@ print_text(str(e)) sys.exit("Aborting Landscape configuration") - print("Please wait...") - # Attempt to register the client. reactor = LandscapeReactor() if config.silent: @@ -841,7 +934,8 @@ default_answer = not is_registered(config) answer = prompt_yes_no( "\nRequest a new registration for this computer now?", - default=default_answer) + default=default_answer, + ) if answer: result = register(config, reactor) report_registration_outcome(result, print=print) diff -Nru landscape-client-23.02/landscape/client/deployment.py landscape-client-23.08/landscape/client/deployment.py --- landscape-client-23.02/landscape/client/deployment.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/deployment.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,32 +1,36 @@ import os.path import sys - from optparse import SUPPRESS_HELP + from twisted.logger import globalLogBeginner from landscape import VERSION +from landscape.client.upgraders import UPGRADE_MANAGERS from landscape.lib import logging from landscape.lib.config import BaseConfiguration as _BaseConfiguration from landscape.lib.persist import Persist -from landscape.client.upgraders import UPGRADE_MANAGERS - def init_logging(configuration, program_name): """Given a basic configuration, set up logging.""" - logging.init_app_logging(configuration.log_dir, configuration.log_level, - progname=program_name, - quiet=configuration.quiet) + logging.init_app_logging( + configuration.log_dir, + configuration.log_level, + progname=program_name, + quiet=configuration.quiet, + ) # Initialize twisted logging, even if we don't explicitly use it, # because of leaky logs https://twistedmatrix.com/trac/ticket/8164 globalLogBeginner.beginLoggingTo( - [lambda _: None], redirectStandardIO=False, discardBuffer=True) + [lambda _: None], + redirectStandardIO=False, + discardBuffer=True, + ) -def _is_script(filename=sys.argv[0], - _scriptdir=os.path.abspath("scripts")): +def _is_script(filename=sys.argv[0], _scriptdir=os.path.abspath("scripts")): filename = os.path.abspath(filename) - return (os.path.dirname(filename) == _scriptdir) + return os.path.dirname(filename) == _scriptdir class BaseConfiguration(_BaseConfiguration): @@ -35,8 +39,10 @@ default_config_filename = "/etc/landscape/client.conf" if _is_script(): - default_config_filenames = ("landscape-client.conf", - default_config_filename) + default_config_filenames = ( + "landscape-client.conf", + default_config_filename, + ) else: default_config_filenames = (default_config_filename,) default_data_dir = "/var/lib/landscape/client/" @@ -44,7 +50,7 @@ config_section = "client" def __init__(self): - super(BaseConfiguration, self).__init__() + super().__init__() self._command_line_defaults["config"] = None @@ -56,10 +62,10 @@ - config - data_path """ - return super(BaseConfiguration, self).make_parser( - cfgfile=self.default_config_filename, - datadir=self.default_data_dir, - ) + return super().make_parser( + cfgfile=self.default_config_filename, + datadir=self.default_data_dir, + ) class Configuration(BaseConfiguration): @@ -84,43 +90,82 @@ - C{ignore_sigint} (C{False}) - C{stagger_launch} (C{0.1}) """ - parser = super(Configuration, self).make_parser() + parser = super().make_parser() logging.add_cli_options(parser, logdir="/var/log/landscape") - parser.add_option("-u", "--url", default=self.DEFAULT_URL, - help="The server URL to connect to.") - parser.add_option("--ping-url", - help="The URL to perform lightweight exchange " - "initiation with.", - default="http://landscape.canonical.com/ping") - parser.add_option("-k", "--ssl-public-key", - help="The public SSL key to verify the server. " - "Only used if the given URL is https.") - parser.add_option("--ignore-sigint", action="store_true", - default=False, help="Ignore interrupt signals.") - parser.add_option("--ignore-sigusr1", action="store_true", - default=False, help="Ignore SIGUSR1 signal to " - "rotate logs.") - parser.add_option("--package-monitor-interval", default=30 * 60, - type="int", - help="The interval between package monitor runs " - "(default: 1800).") - parser.add_option("--apt-update-interval", default=6 * 60 * 60, - type="int", - help="The interval between apt update runs " - "(default: 21600).") - parser.add_option("--flush-interval", default=5 * 60, type="int", - metavar="INTERVAL", - help="The number of seconds between flushes to disk " - "for persistent data.") - parser.add_option("--stagger-launch", metavar="STAGGER_RATIO", - dest="stagger_launch", default=0.1, type=float, - help="Ratio, between 0 and 1, by which to scatter " - "various tasks of landscape.") + parser.add_option( + "-u", + "--url", + default=self.DEFAULT_URL, + help="The server URL to connect to.", + ) + parser.add_option( + "--ping-url", + help="The URL to perform lightweight exchange initiation with.", + default="http://landscape.canonical.com/ping", + ) + parser.add_option( + "-k", + "--ssl-public-key", + help="The public SSL key to verify the server. " + "Only used if the given URL is https.", + ) + parser.add_option( + "--ignore-sigint", + action="store_true", + default=False, + help="Ignore interrupt signals.", + ) + parser.add_option( + "--ignore-sigusr1", + action="store_true", + default=False, + help="Ignore SIGUSR1 signal to " "rotate logs.", + ) + parser.add_option( + "--package-monitor-interval", + default=30 * 60, + type="int", + help="The interval between package monitor runs " + "(default: 1800).", + ) + parser.add_option( + "--apt-update-interval", + default=6 * 60 * 60, + type="int", + help="The interval between apt update runs (default: 21600).", + ) + parser.add_option( + "--flush-interval", + default=5 * 60, + type="int", + metavar="INTERVAL", + help="The number of seconds between flushes to disk " + "for persistent data.", + ) + parser.add_option( + "--stagger-launch", + metavar="STAGGER_RATIO", + dest="stagger_launch", + default=0.1, + type=float, + help="Ratio, between 0 and 1, by which to stagger " + "various tasks of landscape.", + ) + parser.add_option( + "--snap-monitor-interval", + default=30 * 60, + type="int", + help="The interval between snap monitor runs (default 1800).", + ) # Hidden options, used for load-testing to run in-process clones parser.add_option("--clones", default=0, type=int, help=SUPPRESS_HELP) - parser.add_option("--start-clones-over", default=25 * 60, type=int, - help=SUPPRESS_HELP) + parser.add_option( + "--start-clones-over", + default=25 * 60, + type=int, + help=SUPPRESS_HELP, + ) return parser diff -Nru landscape-client-23.02/landscape/client/lockfile.py landscape-client-23.08/landscape/client/lockfile.py --- landscape-client-23.02/landscape/client/lockfile.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/lockfile.py 2023-08-17 21:19:32.000000000 +0000 @@ -39,14 +39,14 @@ # out normally. pass - result = super(PatchedFilesystemLock, self).lock() + result = super().lock() self.clean = self.clean and clean return result def get_process_name(pid): """Return a process name from a pid.""" - stat_path = "/proc/{}/stat".format(pid) + stat_path = f"/proc/{pid}/stat" with open(stat_path) as stat_file: stat = stat_file.read() return stat.partition("(")[2].rpartition(")")[0] diff -Nru landscape-client-23.02/landscape/client/manager/aptsources.py landscape-client-23.08/landscape/client/manager/aptsources.py --- landscape-client-23.02/landscape/client/manager/aptsources.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/aptsources.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,17 @@ import glob +import grp import os import pwd -import grp import shutil import tempfile import uuid from twisted.internet.defer import succeed -from landscape.lib.twisted_util import spawn_process - from landscape.client.manager.plugin import ManagerPlugin from landscape.client.package.reporter import find_reporter_command +from landscape.constants import FALSE_VALUES +from landscape.lib.twisted_util import spawn_process class ProcessError(Exception): @@ -26,9 +26,11 @@ TRUSTED_GPG_D = "/etc/apt/trusted.gpg.d" def register(self, registry): - super(AptSources, self).register(registry) - registry.register_message("apt-sources-replace", - self._handle_repositories) + super().register(registry) + registry.register_message( + "apt-sources-replace", + self._handle_repositories, + ) def _run_process(self, command, args, uid=None, gid=None): """ @@ -42,7 +44,7 @@ """ out, err, code = result if code: - raise ProcessError("%s\n%s" % (out, err)) + raise ProcessError(f"{out}\n{err}") def _handle_process_failure(self, failure): """ @@ -50,7 +52,7 @@ """ if not failure.check(ProcessError): out, err, signal = failure.value.args - raise ProcessError("%s\n%s" % (out, err)) + raise ProcessError(f"{out}\n{err}") else: return failure @@ -85,9 +87,9 @@ "-----END PGP PUBLIC KEY BLOCK-----"]} """ deferred = succeed(None) - prefix = 'landscape-server-' + prefix = "landscape-server-mirror" for key in message["gpg-keys"]: - filename = prefix + str(uuid.uuid4()) + '.asc' + filename = prefix + str(uuid.uuid4()) + ".asc" key_path = os.path.join(self.TRUSTED_GPG_D, filename) with open(key_path, "w") as key_file: key_file.write(key) @@ -96,8 +98,15 @@ return self.call_with_operation_result(message, lambda: deferred) def _handle_sources(self, ignored, sources): - """Handle sources repositories.""" - saved_sources = "{}.save".format(self.SOURCES_LIST) + """ + Replaces `SOURCES_LIST` with a Landscape-managed version and moves the + original to a ".save" file. + + Configurably does the same with files in `SOURCES_LIST_D`. + """ + + saved_sources = f"{self.SOURCES_LIST}.save" + if sources: fd, path = tempfile.mkstemp() os.close(fd) @@ -106,29 +115,42 @@ new_sources.write( "# Landscape manages repositories for this computer\n" "# Original content of sources.list can be found in " - "sources.list.save\n") + "sources.list.save\n", + ) original_stat = os.stat(self.SOURCES_LIST) if not os.path.isfile(saved_sources): shutil.move(self.SOURCES_LIST, saved_sources) shutil.move(path, self.SOURCES_LIST) os.chmod(self.SOURCES_LIST, original_stat.st_mode) - os.chown(self.SOURCES_LIST, original_stat.st_uid, - original_stat.st_gid) + os.chown( + self.SOURCES_LIST, + original_stat.st_uid, + original_stat.st_gid, + ) else: # Re-instate original sources if os.path.isfile(saved_sources): shutil.move(saved_sources, self.SOURCES_LIST) - for filename in glob.glob(os.path.join(self.SOURCES_LIST_D, "*.list")): - shutil.move(filename, "%s.save" % filename) + manage_sources_list_d = getattr( + self.registry.config, + "manage_sources_list_d", + True, + ) + if manage_sources_list_d not in FALSE_VALUES: + filenames = glob.glob(os.path.join(self.SOURCES_LIST_D, "*.list")) + for filename in filenames: + shutil.move(filename, f"{filename}.save") for source in sources: - filename = os.path.join(self.SOURCES_LIST_D, - "landscape-%s.list" % source["name"]) + filename = os.path.join( + self.SOURCES_LIST_D, + f"landscape-{source['name']}.list", + ) # Servers send unicode, but an upgrade from python2 can get bytes # from stored messages, so we need to handle both. - is_unicode = isinstance(source["content"], type(u"")) + is_unicode = isinstance(source["content"], type("")) with open(filename, ("w" if is_unicode else "wb")) as sources_file: sources_file.write(source["content"]) os.chmod(filename, 0o644) @@ -142,7 +164,7 @@ args = ["--force-apt-update"] if self.registry.config.config is not None: - args.append("--config=%s" % self.registry.config.config) + args.append(f"--config={self.registry.config.config}") if os.getuid() == 0: uid = pwd.getpwnam("landscape").pw_uid diff -Nru landscape-client-23.02/landscape/client/manager/config.py landscape-client-23.08/landscape/client/manager/config.py --- landscape-client-23.02/landscape/client/manager/config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/config.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,9 +4,16 @@ from landscape.client.manager.scriptexecution import ALL_USERS -ALL_PLUGINS = ["ProcessKiller", "PackageManager", "UserManager", - "ShutdownManager", "AptSources", "HardwareInfo", - "KeystoneToken"] +ALL_PLUGINS = [ + "ProcessKiller", + "PackageManager", + "UserManager", + "ShutdownManager", + "AptSources", + "HardwareInfo", + "KeystoneToken", + "SnapManager", +] class ManagerConfiguration(Configuration): @@ -17,26 +24,38 @@ Specialize L{Configuration.make_parser}, adding many manager-specific options. """ - parser = super(ManagerConfiguration, self).make_parser() + parser = super().make_parser() - parser.add_option("--manager-plugins", metavar="PLUGIN_LIST", - help="Comma-delimited list of manager plugins to " - "use. ALL means use all plugins.", - default="ALL") - parser.add_option("--include-manager-plugins", metavar="PLUGIN_LIST", - help="Comma-delimited list of manager plugins to " - "enable, in addition to the defaults.") - parser.add_option("--script-users", metavar="USERS", - help="Comma-delimited list of usernames that scripts" - " may be run as. Default is to allow all " - "users.") - parser.add_option("--script-output-limit", - metavar="SCRIPT_OUTPUT_LIMIT", - type="int", default=512, - help="Maximum allowed output size that scripts" - " can send. " - "Script output will be truncated at that limit." - " Default is 512 (kB)") + parser.add_option( + "--manager-plugins", + metavar="PLUGIN_LIST", + help="Comma-delimited list of manager plugins to " + "use. ALL means use all plugins.", + default="ALL", + ) + parser.add_option( + "--include-manager-plugins", + metavar="PLUGIN_LIST", + help="Comma-delimited list of manager plugins to " + "enable, in addition to the defaults.", + ) + parser.add_option( + "--script-users", + metavar="USERS", + help="Comma-delimited list of usernames that scripts" + " may be run as. Default is to allow all " + "users.", + ) + parser.add_option( + "--script-output-limit", + metavar="SCRIPT_OUTPUT_LIMIT", + type="int", + default=512, + help="Maximum allowed output size that scripts" + " can send. " + "Script output will be truncated at that limit." + " Default is 512 (kB)", + ) return parser diff -Nru landscape-client-23.02/landscape/client/manager/customgraph.py landscape-client-23.08/landscape/client/manager/customgraph.py --- landscape-client-23.02/landscape/client/manager/customgraph.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/customgraph.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,25 @@ +import logging import os import time -import logging -from twisted.internet.defer import fail, DeferredList, succeed +from twisted.internet.defer import DeferredList +from twisted.internet.defer import fail +from twisted.internet.defer import succeed from twisted.python.compat import iteritems -from landscape.lib.scriptcontent import generate_script_hash -from landscape.lib.user import get_user_info, UnknownUserError from landscape.client.accumulate import Accumulator from landscape.client.manager.plugin import ManagerPlugin +from landscape.client.manager.scriptexecution import ProcessFailedError from landscape.client.manager.scriptexecution import ( - ProcessFailedError, ScriptRunnerMixin, ProcessTimeLimitReachedError) + ProcessTimeLimitReachedError, +) +from landscape.client.manager.scriptexecution import ScriptRunnerMixin +from landscape.lib.scriptcontent import generate_script_hash +from landscape.lib.user import get_user_info +from landscape.lib.user import UnknownUserError -class StoreProxy(object): +class StoreProxy: """ Persist-like interface to store graph-points into SQLite store. """ @@ -33,19 +39,17 @@ class InvalidFormatError(Exception): - def __init__(self, value): self.value = value Exception.__init__(self, self._get_message()) def _get_message(self): - return u"Failed to convert to number: '%s'" % self.value + return f"Failed to convert to number: '{self.value}'" class NoOutputError(Exception): - def __init__(self): - Exception.__init__(self, u"Script did not output any value") + Exception.__init__(self, "Script did not output any value") class ProhibitedUserError(Exception): @@ -60,7 +64,7 @@ Exception.__init__(self, self._get_message()) def _get_message(self): - return (u"Custom graph cannot be run as user %s" % self.username) + return f"Custom graph cannot be run as user {self.username}" class CustomGraphPlugin(ManagerPlugin, ScriptRunnerMixin): @@ -71,23 +75,28 @@ @param process_factory: The L{IReactorProcess} provider to run the process with. """ + run_interval = 300 size_limit = 1000 time_limit = 10 message_type = "custom-graph" def __init__(self, process_factory=None, create_time=time.time): - super(CustomGraphPlugin, self).__init__(process_factory) + super().__init__(process_factory) self._create_time = create_time self._data = {} self.do_send = True def register(self, registry): - super(CustomGraphPlugin, self).register(registry) + super().register(registry) registry.register_message( - "custom-graph-add", self._handle_custom_graph_add) + "custom-graph-add", + self._handle_custom_graph_add, + ) registry.register_message( - "custom-graph-remove", self._handle_custom_graph_remove) + "custom-graph-remove", + self._handle_custom_graph_remove, + ) self._persist = StoreProxy(self.registry.store) self._accumulate = Accumulator(self._persist, self.run_interval) @@ -118,8 +127,7 @@ data_path = self.registry.config.data_path scripts_directory = os.path.join(data_path, "custom-graph-scripts") - filename = os.path.join( - scripts_directory, "graph-%d" % (graph_id,)) + filename = os.path.join(scripts_directory, f"graph-{graph_id:d}") if os.path.exists(filename): os.unlink(filename) @@ -127,23 +135,31 @@ try: uid, gid = get_user_info(user)[:2] except UnknownUserError: - logging.error(u"Attempt to add graph with unknown user %s" % - user) + logging.error(f"Attempt to add graph with unknown user {user}") else: script_file = open(filename, "wb") # file is closed in write_script_file self.write_script_file( - script_file, filename, shell, code, uid, gid) + script_file, + filename, + shell, + code, + uid, + gid, + ) if graph_id in self._data: del self._data[graph_id] self.registry.store.add_graph(graph_id, filename, user) def _format_exception(self, e): - return u"%s: %s" % (e.__class__.__name__, e.args[0]) + return "{}: {}".format(e.__class__.__name__, e.args[0]) def exchange(self, urgent=False): self.registry.broker.call_if_accepted( - self.message_type, self.send_message, urgent) + self.message_type, + self.send_message, + urgent, + ) def send_message(self, urgent): if not self.do_send: @@ -155,7 +171,10 @@ if os.path.isfile(filename): script_hash = self._get_script_hash(filename) self._data[graph_id] = { - "values": [], "error": u"", "script-hash": script_hash} + "values": [], + "error": "", + "script-hash": script_hash, + } message = {"type": self.message_type, "data": self._data} @@ -163,11 +182,17 @@ for graph_id, item in iteritems(self._data): script_hash = item["script-hash"] new_data[graph_id] = { - "values": [], "error": u"", "script-hash": script_hash} + "values": [], + "error": "", + "script-hash": script_hash, + } self._data = new_data - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) def _handle_data(self, output, graph_id, now): if graph_id not in self._data: @@ -190,15 +215,19 @@ if failure.check(ProcessFailedError): failure_value = failure.value.data if failure.value.exit_code: - failure_value = ("%s (process exited with code %d)" % - (failure_value, failure.value.exit_code)) + failure_value = ( + f"{failure_value} (process exited with code " + f"{failure.value.exit_code:d})" + ) self._data[graph_id]["error"] = failure_value elif failure.check(ProcessTimeLimitReachedError): - self._data[graph_id]["error"] = ( - u"Process exceeded the %d seconds limit" % (self.time_limit,)) + self._data[graph_id][ + "error" + ] = f"Process exceeded the {self.time_limit:d} seconds limit" else: self._data[graph_id]["error"] = self._format_exception( - failure.value) + failure.value, + ) def _get_script_hash(self, filename): with open(filename) as file_object: @@ -218,7 +247,10 @@ return succeed([]) return self.registry.broker.call_if_accepted( - self.message_type, self._continue_run, graphs) + self.message_type, + self._continue_run, + graphs, + ) def _continue_run(self, graphs): deferred_list = [] @@ -231,7 +263,10 @@ script_hash = b"" if graph_id not in self._data: self._data[graph_id] = { - "values": [], "error": u"", "script-hash": script_hash} + "values": [], + "error": "", + "script-hash": script_hash, + } else: self._data[graph_id]["script-hash"] = script_hash try: @@ -249,7 +284,13 @@ if not os.path.isfile(filename): continue result = self._run_script( - filename, uid, gid, path, {}, self.time_limit) + filename, + uid, + gid, + path, + {}, + self.time_limit, + ) result.addCallback(self._handle_data, graph_id, now) result.addErrback(self._handle_error, graph_id) deferred_list.append(result) diff -Nru landscape-client-23.02/landscape/client/manager/fakepackagemanager.py landscape-client-23.08/landscape/client/manager/fakepackagemanager.py --- landscape-client-23.02/landscape/client/manager/fakepackagemanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/fakepackagemanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,7 @@ import random -from landscape.client.manager.plugin import ManagerPlugin from landscape.client.manager.manager import SUCCEEDED +from landscape.client.manager.plugin import ManagerPlugin class FakePackageManager(ManagerPlugin): @@ -10,41 +10,57 @@ randint = random.randint def register(self, registry): - super(FakePackageManager, self).register(registry) + super().register(registry) self.config = registry.config - registry.register_message("change-packages", - self.handle_change_packages) - registry.register_message("change-package-locks", - self.handle_change_package_locks) - registry.register_message("release-upgrade", - self.handle_release_upgrade) + registry.register_message( + "change-packages", + self.handle_change_packages, + ) + registry.register_message( + "change-package-locks", + self.handle_change_package_locks, + ) + registry.register_message( + "release-upgrade", + self.handle_release_upgrade, + ) def _handle(self, response): delay = self.randint(30, 300) self.registry.reactor.call_later( - delay, self.manager.broker.send_message, response, - self._session_id, urgent=True) + delay, + self.manager.broker.send_message, + response, + self._session_id, + urgent=True, + ) def handle_change_packages(self, message): - response = {"type": "change-packages-result", - "operation-id": message.get("operation-id"), - "result-code": 1, - "result-text": "OK done."} + response = { + "type": "change-packages-result", + "operation-id": message.get("operation-id"), + "result-code": 1, + "result-text": "OK done.", + } return self._handle(response) def handle_change_package_locks(self, message): - response = {"type": "operation-result", - "operation-id": message.get("operation-id"), - "status": SUCCEEDED, - "result-text": "Package locks successfully changed.", - "result-code": 0} + response = { + "type": "operation-result", + "operation-id": message.get("operation-id"), + "status": SUCCEEDED, + "result-text": "Package locks successfully changed.", + "result-code": 0, + } return self._handle(response) def handle_release_upgrade(self, message): - response = {"type": "operation-result", - "operation-id": message.get("operation-id"), - "status": SUCCEEDED, - "result-text": "Successful release upgrade.", - "result-code": 0} + response = { + "type": "operation-result", + "operation-id": message.get("operation-id"), + "status": SUCCEEDED, + "result-text": "Successful release upgrade.", + "result-code": 0, + } return self._handle(response) diff -Nru landscape-client-23.02/landscape/client/manager/hardwareinfo.py landscape-client-23.08/landscape/client/manager/hardwareinfo.py --- landscape-client-23.02/landscape/client/manager/hardwareinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/hardwareinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,8 +2,8 @@ from twisted.internet.utils import getProcessOutput -from landscape.lib.encoding import encode_values from landscape.client.manager.plugin import ManagerPlugin +from landscape.lib.encoding import encode_values class HardwareInfo(ManagerPlugin): @@ -15,17 +15,23 @@ command = "/usr/bin/lshw" def register(self, registry): - super(HardwareInfo, self).register(registry) + super().register(registry) self.call_on_accepted(self.message_type, self.send_message) def run(self): return self.registry.broker.call_if_accepted( - self.message_type, self.send_message) + self.message_type, + self.send_message, + ) def send_message(self): environ = encode_values(os.environ) result = getProcessOutput( - self.command, args=["-xml", "-quiet"], env=environ, path=None) + self.command, + args=["-xml", "-quiet"], + env=environ, + path=None, + ) return result.addCallback(self._got_output) def _got_output(self, output): diff -Nru landscape-client-23.02/landscape/client/manager/keystonetoken.py landscape-client-23.08/landscape/client/manager/keystonetoken.py --- landscape-client-23.02/landscape/client/manager/keystonetoken.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/keystonetoken.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,12 @@ -import os import logging +import os -from landscape.lib.compat import _PY3 - -from landscape.lib.compat import ConfigParser, NoOptionError from landscape.client.monitor.plugin import DataWatcher -from landscape.lib.persist import Persist +from landscape.lib.compat import _PY3 +from landscape.lib.compat import ConfigParser +from landscape.lib.compat import NoOptionError from landscape.lib.fs import read_binary_file +from landscape.lib.persist import Persist KEYSTONE_CONFIG_FILE = "/etc/keystone/keystone.conf" @@ -17,6 +17,7 @@ A plugin which pulls the admin_token from the keystone configuration file and sends it to the landscape server. """ + message_type = "keystone-token" message_key = "data" run_interval = 60 * 15 @@ -26,12 +27,16 @@ self._keystone_config_file = keystone_config_file def register(self, client): - super(KeystoneToken, self).register(client) - self._persist_filename = os.path.join(self.registry.config.data_path, - "keystone.bpickle") + super().register(client) + self._persist_filename = os.path.join( + self.registry.config.data_path, + "keystone.bpickle", + ) self._persist = Persist(filename=self._persist_filename) - self.registry.reactor.call_every(self.registry.config.flush_interval, - self.flush) + self.registry.reactor.call_every( + self.registry.config.flush_interval, + self.flush, + ) def _reset(self): """ @@ -54,16 +59,20 @@ # We need to use the surrogateescape error handler as the # admin_token my contain arbitrary bytes. The ConfigParser in # Python 2 on the other hand does not support read_string. - config_str = read_binary_file( - self._keystone_config_file).decode("utf-8", "surrogateescape") + config_str = read_binary_file(self._keystone_config_file).decode( + "utf-8", + "surrogateescape", + ) config.read_string(config_str) else: config.read(self._keystone_config_file) try: admin_token = config.get("DEFAULT", "admin_token") except NoOptionError: - logging.error("KeystoneToken: No admin_token found in %s" - % (self._keystone_config_file)) + logging.error( + "KeystoneToken: No admin_token found in " + f"{self._keystone_config_file}", + ) return None # There is no support for surrogateescape in Python 2, but we actually # have bytes in this case anyway. diff -Nru landscape-client-23.02/landscape/client/manager/manager.py landscape-client-23.08/landscape/client/manager/manager.py --- landscape-client-23.02/landscape/client/manager/manager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/manager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,5 @@ -from landscape.client.manager.store import ManagerStore from landscape.client.broker.client import BrokerClient +from landscape.client.manager.store import ManagerStore # Protocol messages! Same constants are defined in the server. FAILED = 5 @@ -12,7 +12,7 @@ name = "manager" def __init__(self, reactor, config): - super(Manager, self).__init__(reactor, config) + super().__init__(reactor, config) self.reactor = reactor self.config = config self.store = ManagerStore(self.config.store_filename) diff -Nru landscape-client-23.02/landscape/client/manager/packagemanager.py landscape-client-23.08/landscape/client/manager/packagemanager.py --- landscape-client-23.02/landscape/client/manager/packagemanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/packagemanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,14 @@ import logging import os -from twisted.internet.utils import getProcessOutput from twisted.internet.defer import succeed +from twisted.internet.utils import getProcessOutput -from landscape.lib.encoding import encode_values -from landscape.lib.apt.package.store import PackageStore +from landscape.client.manager.plugin import ManagerPlugin from landscape.client.package.changer import PackageChanger from landscape.client.package.releaseupgrader import ReleaseUpgrader -from landscape.client.manager.plugin import ManagerPlugin +from landscape.lib.apt.package.store import PackageStore +from landscape.lib.encoding import encode_values class PackageManager(ManagerPlugin): @@ -17,20 +17,28 @@ _package_store = None def register(self, registry): - super(PackageManager, self).register(registry) + super().register(registry) self.config = registry.config if not self._package_store: - filename = os.path.join(registry.config.data_path, - "package/database") + filename = os.path.join( + registry.config.data_path, + "package/database", + ) self._package_store = PackageStore(filename) - registry.register_message("change-packages", - self.handle_change_packages) - registry.register_message("change-package-locks", - self.handle_change_package_locks) - registry.register_message("release-upgrade", - self.handle_release_upgrade) + registry.register_message( + "change-packages", + self.handle_change_packages, + ) + registry.register_message( + "change-package-locks", + self.handle_change_package_locks, + ) + registry.register_message( + "release-upgrade", + self.handle_release_upgrade, + ) # When the package reporter notifies us that something has changed, # we want to run again to see if we can now fulfill tasks that were @@ -74,7 +82,12 @@ # path is set to None so that getProcessOutput does not # chdir to "." see bug #211373 result = getProcessOutput( - command, args=args, env=environ, errortoo=1, path=None) + command, + args=args, + env=environ, + errortoo=1, + path=None, + ) result.addCallback(self._got_output, cls) else: result = succeed(None) @@ -82,5 +95,6 @@ def _got_output(self, output, cls): if output: - logging.warning("Package %s output:\n%s" % - (cls.queue_name, output)) + logging.warning( + f"Package {cls.queue_name} output:\n{output}", + ) diff -Nru landscape-client-23.02/landscape/client/manager/plugin.py landscape-client-23.08/landscape/client/manager/plugin.py --- landscape-client-23.02/landscape/client/manager/plugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/plugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,8 @@ from twisted.internet.defer import maybeDeferred +from landscape.client.broker.client import BrokerClientPlugin from landscape.lib.format import format_object from landscape.lib.log import log_failure -from landscape.client.broker.client import BrokerClientPlugin # Protocol messages! Same constants are defined in the server. FAILED = 5 @@ -10,7 +10,6 @@ class ManagerPlugin(BrokerClientPlugin): - @property def manager(self): """An alias for the C{client} attribute}.""" @@ -37,21 +36,30 @@ return SUCCEEDED, text def failure(failure): - text = "%s: %s" % (failure.type.__name__, failure.value) - msg = ("Error occured running message handler %s with " - "args %r %r.", format_object(callable), args, kwargs) + text = f"{failure.type.__name__}: {failure.value}" + msg = ( + "Error occured running message handler %s with " "args %r %r.", + format_object(callable), + args, + kwargs, + ) log_failure(failure, msg=msg) return FAILED, text def send(args): status, text = args - result = {"type": "operation-result", - "status": status, - "operation-id": message["operation-id"]} + result = { + "type": "operation-result", + "status": status, + "operation-id": message["operation-id"], + } if text: result["result-text"] = text return self.manager.broker.send_message( - result, self._session_id, urgent=True) + result, + self._session_id, + urgent=True, + ) deferred.addCallback(success) deferred.addErrback(failure) diff -Nru landscape-client-23.02/landscape/client/manager/processkiller.py landscape-client-23.08/landscape/client/manager/processkiller.py --- landscape-client-23.02/landscape/client/manager/processkiller.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/processkiller.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,10 @@ +import logging import os import signal -import logging from datetime import datetime -from landscape.lib.process import ProcessInformation from landscape.client.manager.plugin import ManagerPlugin +from landscape.lib.process import ProcessInformation class ProcessNotFoundError(Exception): @@ -31,24 +31,35 @@ self.process_info = process_info def register(self, registry): - super(ProcessKiller, self).register(registry) - registry.register_message("signal-process", - self._handle_signal_process) + super().register(registry) + registry.register_message( + "signal-process", + self._handle_signal_process, + ) def _handle_signal_process(self, message): - self.call_with_operation_result(message, self.signal_process, - message["pid"], message["name"], - message["start-time"], - message["signal"]) + self.call_with_operation_result( + message, + self.signal_process, + message["pid"], + message["name"], + message["start-time"], + message["signal"], + ) def signal_process(self, pid, name, start_time, signame): - logging.info("Sending %s signal to the process with PID %d.", - signame, pid) + logging.info( + "Sending %s signal to the process with PID %d.", + signame, + pid, + ) process_info = self.process_info.get_process_info(pid) if not process_info: start_time = datetime.utcfromtimestamp(start_time) - message = ("The process %s with PID %d that started at %s UTC was " - "not found") % (name, pid, start_time) + message = ( + f"The process {name} with PID {pid:d} that started " + f"at {start_time} UTC was not found" + ) raise ProcessNotFoundError(message) elif abs(process_info["start-time"] - start_time) > 2: # We don't check that the start time matches precisely because @@ -56,18 +67,21 @@ # cascade into having imprecise process start times. expected_time = datetime.utcfromtimestamp(start_time) actual_time = datetime.utcfromtimestamp(process_info["start-time"]) - message = ("The process %s with PID %d that started at " - "%s UTC was not found. A process with the same " - "PID that started at %s UTC was found and not " - "sent the %s signal") % (name, pid, expected_time, - actual_time, signame) + message = ( + f"The process {name} with PID {pid:d} that started at " + f"{expected_time} UTC was not found. A process with the same " + f"PID that started at {actual_time} UTC was found and not " + f"sent the {signame} signal" + ) raise ProcessMismatchError(message) - signum = getattr(signal, "SIG%s" % (signame,)) + signum = getattr(signal, f"SIG{signame}") try: os.kill(pid, signum) except Exception: # XXX Nothing is indicating what the problem was. - message = ("Attempting to send the %s signal to the process " - "%s with PID %d failed") % (signame, name, pid) + message = ( + f"Attempting to send the {signame} signal to the process " + f"{name} with PID {pid:d} failed" + ) raise SignalProcessError(message) diff -Nru landscape-client-23.02/landscape/client/manager/scriptexecution.py landscape-client-23.08/landscape/client/manager/scriptexecution.py --- landscape-client-23.02/landscape/client/manager/scriptexecution.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/scriptexecution.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,25 +3,30 @@ @var ALL_USERS: A token indicating all users should be allowed. """ -import os -import sys import os.path -import tempfile import shutil +import sys +import tempfile -from twisted.internet.protocol import ProcessProtocol -from twisted.internet.defer import ( - Deferred, fail, inlineCallbacks, returnValue, succeed) +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import inlineCallbacks +from twisted.internet.defer import returnValue +from twisted.internet.defer import succeed from twisted.internet.error import ProcessDone +from twisted.internet.protocol import ProcessProtocol from twisted.python.compat import unicode from landscape import VERSION +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import ManagerPlugin +from landscape.client.manager.plugin import SUCCEEDED from landscape.constants import UBUNTU_PATH -from landscape.lib.fetch import fetch_async, HTTPCodeError +from landscape.lib.fetch import fetch_async +from landscape.lib.fetch import HTTPCodeError from landscape.lib.persist import Persist from landscape.lib.scriptcontent import build_script from landscape.lib.user import get_user_info -from landscape.client.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED ALL_USERS = object() @@ -65,10 +70,10 @@ Exception.__init__(self, self._get_message()) def _get_message(self): - return "Unknown interpreter: '%s'" % self.interpreter + return f"Unknown interpreter: '{self.interpreter}'" -class ScriptRunnerMixin(object): +class ScriptRunnerMixin: """ @param process_factory: The L{IReactorProcess} provider to run the process with. @@ -108,46 +113,61 @@ gid = None env = { key: ( - value.encode(sys.getfilesystemencoding(), errors='replace') - if isinstance(value, unicode) else value + value.encode(sys.getfilesystemencoding(), errors="replace") + if isinstance(value, unicode) + else value ) for key, value in env.items() } pp = ProcessAccumulationProtocol( - self.registry.reactor, self.registry.config.script_output_limit, - self.truncation_indicator) + self.registry.reactor, + self.registry.config.script_output_limit, + self.truncation_indicator, + ) args = (filename,) self.process_factory.spawnProcess( - pp, filename, args=args, uid=uid, gid=gid, path=path, env=env) + pp, + filename, + args=args, + uid=uid, + gid=gid, + path=path, + env=env, + ) if time_limit is not None: pp.schedule_cancel(time_limit) return pp.result_deferred class ScriptExecutionPlugin(ManagerPlugin, ScriptRunnerMixin): - """A plugin which allows execution of arbitrary shell scripts. - - """ + """A plugin which allows execution of arbitrary shell scripts.""" def register(self, registry): - super(ScriptExecutionPlugin, self).register(registry) + super().register(registry) registry.register_message( - "execute-script", self._handle_execute_script) + "execute-script", + self._handle_execute_script, + ) def _respond(self, status, data, opid, result_code=None): if not isinstance(data, unicode): # Let's decode result-text, replacing non-printable # characters data = data.decode("utf-8", "replace") - message = {"type": "operation-result", - "status": status, - "result-text": data, - "operation-id": opid} + message = { + "type": "operation-result", + "status": status, + "result-text": data, + "operation-id": opid, + } if result_code: message["result-code"] = result_code return self.registry.broker.send_message( - message, self._session_id, True) + message, + self._session_id, + True, + ) def _handle_execute_script(self, message): opid = message["operation-id"] @@ -156,14 +176,19 @@ if not self.is_user_allowed(user): return self._respond( FAILED, - u"Scripts cannot be run as user %s." % (user,), - opid) + f"Scripts cannot be run as user {user}.", + opid, + ) server_supplied_env = message.get("env", None) - d = self.run_script(message["interpreter"], message["code"], - time_limit=message["time-limit"], user=user, - attachments=message["attachments"], - server_supplied_env=server_supplied_env) + d = self.run_script( + message["interpreter"], + message["code"], + time_limit=message["time-limit"], + user=user, + attachments=message["attachments"], + server_supplied_env=server_supplied_env, + ) d.addCallback(self._respond_success, opid) d.addErrback(self._respond_failure, opid) return d @@ -172,7 +197,7 @@ raise def _format_exception(self, e): - return u"%s: %s" % (e.__class__.__name__, e.args[0]) + return "{}: {}".format(e.__class__.__name__, e.args[0]) def _respond_success(self, data, opid): return self._respond(SUCCEEDED, data, opid) @@ -186,8 +211,11 @@ elif failure.check(HTTPCodeError): code = FETCH_ATTACHMENTS_FAILED_RESULT return self._respond( - FAILED, str(failure.value), opid, - FETCH_ATTACHMENTS_FAILED_RESULT) + FAILED, + str(failure.value), + opid, + FETCH_ATTACHMENTS_FAILED_RESULT, + ) if code is not None: return self._respond(FAILED, failure.value.data, opid, code) @@ -198,9 +226,11 @@ def _save_attachments(self, attachments, uid, gid, computer_id, env): root_path = self.registry.config.url.rsplit("/", 1)[0] + "/attachment/" env["LANDSCAPE_ATTACHMENTS"] = attachment_dir = tempfile.mkdtemp() - headers = {"User-Agent": "landscape-client/%s" % VERSION, - "Content-Type": "application/octet-stream", - "X-Computer-ID": computer_id} + headers = { + "User-Agent": f"landscape-client/{VERSION}", + "Content-Type": "application/octet-stream", + "X-Computer-ID": computer_id, + } for filename, attachment_id in attachments.items(): if isinstance(attachment_id, str): # Backward compatible behavior @@ -208,9 +238,10 @@ yield succeed(None) else: data = yield fetch_async( - "%s%d" % (root_path, attachment_id), + f"{root_path}{attachment_id:d}", cainfo=self.registry.config.ssl_public_key, - headers=headers) + headers=headers, + ) full_filename = os.path.join(attachment_dir, filename) with open(full_filename, "wb") as attachment: os.chmod(full_filename, 0o600) @@ -222,8 +253,15 @@ os.chown(attachment_dir, uid, gid) returnValue(attachment_dir) - def run_script(self, shell, code, user=None, time_limit=None, - attachments=None, server_supplied_env=None): + def run_script( + self, + shell, + code, + user=None, + time_limit=None, + attachments=None, + server_supplied_env=None, + ): """ Run a script based on a shell and the code. @@ -245,15 +283,13 @@ or fail with a L{ProcessTimeLimitReachedError}. """ if not os.path.exists(shell.split()[0]): - return fail( - UnknownInterpreterError(shell)) + return fail(UnknownInterpreterError(shell)) uid, gid, path = get_user_info(user) fd, filename = tempfile.mkstemp() script_file = os.fdopen(fd, "wb") - self.write_script_file( - script_file, filename, shell, code, uid, gid) + self.write_script_file(script_file, filename, shell, code, uid, gid) env = { "PATH": UBUNTU_PATH, @@ -269,8 +305,11 @@ if attachments: persist = Persist( - filename=os.path.join(self.registry.config.data_path, - "broker.bpickle")) + filename=os.path.join( + self.registry.config.data_path, + "broker.bpickle", + ), + ) persist = persist.root_at("registration") computer_id = persist.get("secure-id") try: @@ -283,8 +322,7 @@ def prepare_script(attachment_dir): - return self._run_script( - filename, uid, gid, path, env, time_limit) + return self._run_script(filename, uid, gid, path, env, time_limit) d.addCallback(prepare_script) return d.addBoth(self._cleanup, filename, env, old_umask) @@ -323,9 +361,11 @@ def schedule_cancel(self, time_limit): self._scheduled_cancel = self.reactor.call_later( - time_limit, self._cancel) + time_limit, + self._cancel, + ) - def childDataReceived(self, fd, data): + def childDataReceived(self, fd, data): # noqa: N802 """Some data was received from the child. Add it to our buffer, as long as it doesn't go over L{size_limit} @@ -334,14 +374,14 @@ if self._size < self.size_limit: data_length = len(data) if (self._size + data_length) >= self._truncated_size_limit: - extent = (self._truncated_size_limit - self._size) + extent = self._truncated_size_limit - self._size self.data.append(data[:extent] + self._truncation_indicator) self._size = self.size_limit else: self.data.append(data) self._size += data_length - def processEnded(self, reason): + def processEnded(self, reason): # noqa: N802 """Fire back the deferred. The deferred will be fired with the string of data received from the @@ -366,7 +406,8 @@ self.result_deferred.callback(data) else: self.result_deferred.errback( - ProcessFailedError(data, exit_code)) + ProcessFailedError(data, exit_code), + ) def _cancel(self): """ @@ -391,11 +432,12 @@ def __init__(self): from landscape.client.manager.customgraph import CustomGraphPlugin + self._script_execution = ScriptExecutionPlugin() self._custom_graph = CustomGraphPlugin() def register(self, registry): - super(ScriptExecution, self).register(registry) + super().register(registry) self._script_execution.register(registry) self._custom_graph.register(registry) diff -Nru landscape-client-23.02/landscape/client/manager/service.py landscape-client-23.08/landscape/client/manager/service.py --- landscape-client-23.02/landscape/client/manager/service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,11 @@ from twisted.python.reflect import namedClass -from landscape.client.service import LandscapeService, run_landscape_service -from landscape.client.manager.config import ManagerConfiguration -from landscape.client.broker.amp import RemoteBrokerConnector from landscape.client.amp import ComponentPublisher +from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.manager.config import ManagerConfiguration from landscape.client.manager.manager import Manager +from landscape.client.service import LandscapeService +from landscape.client.service import run_landscape_service class ManagerService(LandscapeService): @@ -16,19 +17,26 @@ service_name = Manager.name def __init__(self, config): - super(ManagerService, self).__init__(config) + super().__init__(config) self.plugins = self.get_plugins() self.manager = Manager(self.reactor, self.config) - self.publisher = ComponentPublisher(self.manager, self.reactor, - self.config) + self.publisher = ComponentPublisher( + self.manager, + self.reactor, + self.config, + ) def get_plugins(self): """Return instances of all the plugins enabled in the configuration.""" - return [namedClass("landscape.client.manager.%s.%s" - % (plugin_name.lower(), plugin_name))() - for plugin_name in self.config.plugin_factories] + return [ + namedClass( + "landscape.client.manager." + f"{plugin_name.lower()}.{plugin_name}", + )() + for plugin_name in self.config.plugin_factories + ] - def startService(self): + def startService(self): # noqa: N802 """Start the manager service. This method does 3 things, in this order: @@ -37,7 +45,7 @@ - Connect to the broker. - Add all configured plugins, that will in turn register themselves. """ - super(ManagerService, self).startService() + super().startService() self.publisher.start() def start_plugins(broker): @@ -51,11 +59,11 @@ connected = self.connector.connect() return connected.addCallback(start_plugins) - def stopService(self): + def stopService(self): # noqa: N802 """Stop the manager and close the connection with the broker.""" self.connector.disconnect() deferred = self.publisher.stop() - super(ManagerService, self).stopService() + super().stopService() return deferred diff -Nru landscape-client-23.02/landscape/client/manager/shutdownmanager.py landscape-client-23.08/landscape/client/manager/shutdownmanager.py --- landscape-client-23.02/landscape/client/manager/shutdownmanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/shutdownmanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,12 @@ import logging from twisted.internet.defer import Deferred -from twisted.internet.protocol import ProcessProtocol from twisted.internet.error import ProcessDone +from twisted.internet.protocol import ProcessProtocol -from landscape.client.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import ManagerPlugin +from landscape.client.manager.plugin import SUCCEEDED class ShutdownFailedError(Exception): @@ -18,7 +20,6 @@ class ShutdownManager(ManagerPlugin): - def __init__(self, process_factory=None): if process_factory is None: from twisted.internet import reactor as process_factory @@ -30,7 +31,7 @@ The shutdown manager handles C{shutdown} activity messages broadcast from the server. """ - super(ShutdownManager, self).register(registry) + super().register(registry) registry.register_message("shutdown", self.perform_shutdown) def perform_shutdown(self, message): @@ -54,22 +55,24 @@ deferred = self._respond(SUCCEEDED, data, operation_id) # After sending the result to the server, stop accepting messages and # wait for the reboot/shutdown. - deferred.addCallback( - lambda _: self.registry.broker.stop_exchanger()) + deferred.addCallback(lambda _: self.registry.broker.stop_exchanger()) return deferred def _respond_failure(self, failure, operation_id, reboot): logging.info("Shutdown request failed.") - failure_report = '\n'.join([ - failure.value.data, - "", - "Attempting to force {operation}. Please note that if this " - "succeeds, Landscape will have no way of knowing and will still " - "mark this activity as having failed. It is recommended you check " - "the state of the machine manually to determine whether " - "{operation} succeeded.".format( - operation="reboot" if reboot else "shutdown") - ]) + failure_report = "\n".join( + [ + failure.value.data, + "", + "Attempting to force {operation}. Please note that if this " + "succeeds, Landscape will have no way of knowing and will " + "still mark this activity as having failed. It is recommended " + "you check the state of the machine manually to determine " + "whether {operation} succeeded.".format( + operation="reboot" if reboot else "shutdown", + ), + ], + ) deferred = self._respond(FAILED, failure_report, operation_id) # Add another callback spawning the poweroff or reboot command (which # seem more reliable in aberrant situations like a post-trusty release @@ -81,30 +84,45 @@ command, args = self._get_command_and_args(protocol, reboot, True) deferred.addCallback( lambda _: self._process_factory.spawnProcess( - protocol, command, args=args)) + protocol, + command, + args=args, + ), + ) return deferred def _respond(self, status, data, operation_id): - message = {"type": "operation-result", - "status": status, - "result-text": data, - "operation-id": operation_id} + message = { + "type": "operation-result", + "status": status, + "result-text": data, + "operation-id": operation_id, + } return self.registry.broker.send_message( - message, self._session_id, True) + message, + self._session_id, + True, + ) def _get_command_and_args(self, protocol, reboot, force=False): """ Returns a C{command, args} 2-tuple suitable for use with L{IReactorProcess.spawnProcess}. """ - minutes = None if force else "+%d" % (protocol.delay // 60,) + minutes = None if force else f"+{protocol.delay//60:d}" args = { (False, False): [ - "/sbin/shutdown", "-h", minutes, - "Landscape is shutting down the system"], + "/sbin/shutdown", + "-h", + minutes, + "Landscape is shutting down the system", + ], (False, True): [ - "/sbin/shutdown", "-r", minutes, - "Landscape is rebooting the system"], + "/sbin/shutdown", + "-r", + minutes, + "Landscape is rebooting the system", + ], (True, False): ["/sbin/poweroff"], (True, True): ["/sbin/reboot"], }[force, reboot] @@ -148,7 +166,7 @@ """ reactor.call_later(timeout, self._succeed) - def childDataReceived(self, fd, data): + def childDataReceived(self, fd, data): # noqa: N802 """Some data was received from the child. Add it to our buffer to pass to C{result} when it's fired. @@ -156,7 +174,7 @@ if self._waiting: self._data.append(data) - def processEnded(self, reason): + def processEnded(self, reason): # noqa: N802 """Fire back the C{result} L{Deferred}. C{result}'s callback will be fired with the string of data received diff -Nru landscape-client-23.02/landscape/client/manager/snapmanager.py landscape-client-23.08/landscape/client/manager/snapmanager.py --- landscape-client-23.02/landscape/client/manager/snapmanager.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/snapmanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,271 @@ +import logging +from collections import deque + +from twisted.internet import task + +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import ManagerPlugin +from landscape.client.manager.plugin import SUCCEEDED +from landscape.client.snap.http import INCOMPLETE_STATUSES +from landscape.client.snap.http import SnapdHttpException +from landscape.client.snap.http import SnapHttp +from landscape.client.snap.http import SUCCESS_STATUSES + + +class SnapManager(ManagerPlugin): + """ + Plugin that updates the state of snaps on this machine, installing, + removing, refreshing, enabling, and disabling them in response to messages. + + Changes trigger SnapMonitor to send an updated state message immediately. + """ + + def __init__(self): + super().__init__() + + self._snap_http = SnapHttp() + self.SNAP_METHODS = { + "install-snaps": self._snap_http.install_snap, + "install-snaps-batch": self._snap_http.install_snaps, + "remove-snaps": self._snap_http.remove_snap, + "remove-snaps-batch": self._snap_http.remove_snaps, + "refresh-snaps": self._snap_http.refresh_snap, + "refresh-snaps-batch": self._snap_http.refresh_snaps, + "hold-snaps": self._snap_http.hold_snap, + "hold-snaps-batch": self._snap_http.hold_snaps, + "unhold-snaps": self._snap_http.unhold_snap, + "unhold-snaps-batch": self._snap_http.unhold_snaps, + } + + def register(self, registry): + super().register(registry) + self.config = registry.config + + registry.register_message("install-snaps", self._handle_snap_task) + registry.register_message("remove-snaps", self._handle_snap_task) + registry.register_message("refresh-snaps", self._handle_snap_task) + registry.register_message("hold-snaps", self._handle_snap_task) + registry.register_message("unhold-snaps", self._handle_snap_task) + + def _handle_snap_task(self, message): + """ + If there are no per-snap arguments for the targeted + snaps, often the task can be done with a single snapd call, if + we have a handler for the action type, which we call via a kind + of dynamic dispatch. + """ + snaps = message["snaps"] + + if snaps and any(len(s) > 1 for s in snaps): + # "name" key only means no per-snap args. + return self._handle_multiple_snap_tasks(message) + + if f"{message['type']}-batch" not in self.SNAP_METHODS: + return self._handle_multiple_snap_tasks(message) + + return self._handle_batch_snap_task(message) + + def _handle_batch_snap_task(self, message): + logging.debug( + f"Handling message {message} as a single batch snap task", + ) + message_type = message["type"] + snaps = [s["name"] for s in message["snaps"]] + snap_args = message.get("args", {}) + opid = message["operation-id"] + errors = {} + queue = deque() + + logging.info(f"Performing {message_type} action for snaps {snaps}") + + try: + response = self._start_snap_task( + message_type + "-batch", + snaps, + **snap_args, + ) + queue.append((response["change"], "BATCH")) + except SnapdHttpException as e: + result = e.json["result"] + logging.error( + f"Error in {message_type}: {message}", + ) + errors["BATCH"] = result + + deferred = self._check_statuses(queue) + deferred.addCallback(self._respond, opid, errors) + + return deferred + + def _handle_multiple_snap_tasks(self, message): + """ + Performs a generic task, `snap_method`, on a group of snaps + where each task must be performed independently per-snap. + + This is required when we want to provide metadata for refreshes + or installs and also specify the channel, revision, or other + arguments per-snap. + """ + logging.debug(f"Handling message {message} as multiple snap tasks") + message_type = message["type"] + snaps = message["snaps"] + opid = message["operation-id"] + errors = {} + queue = deque() + + logging.info(f"Performing {message_type} action for snaps {snaps}") + + # Naively doing this synchronously because each is an HTTP call to the + # snap REST API that returns basically immediately. We poll for their + # completion statuses once they've all been kicked off. + for snap in snaps: + name = snap["name"] + snap_args = snap.get("args", {}) + + try: + response = self._start_snap_task( + message_type, + name, + **snap_args, + ) + queue.append((response["change"], name)) + except SnapdHttpException as e: + result = e.json["result"] + logging.error( + f"Error in {message_type} for '{name}': {message}", + ) + errors[name] = result + + deferred = self._check_statuses(queue) + deferred.addCallback(self._respond, opid, errors) + + return deferred + + def _check_statuses(self, change_queue): + """ + Repeatedly polls for the status of each change in `change_queue` + until all are no longer in-progress. + """ + completed_changes = [] + interval = getattr(self.registry.config, "snapd_poll_interval", 15) + + def get_status(): + """ + Looping function that polls snapd for the status of + changes, moving them from the queue when they are done. + """ + if not change_queue: + loop.stop() + return + + logging.info("Polling snapd for status of pending snap changes") + + try: + result = self._snap_http.check_changes().get("result", []) + result_dict = {c["id"]: c for c in result} + except SnapdHttpException as e: + logging.error(f"Error checking status of snap changes: {e}") + completed_changes.extend( + [(name, str(e)) for _, name in change_queue], + ) + loop.stop() + return + + for _ in range(len(change_queue)): + cid, name = change_queue.popleft() + + # It's possible (though unlikely) that a change is not in the + # list - snapd could have dropped it for some reason. We need + # to know if that happens, hence this check. + if cid not in result_dict: + completed_changes.append((name, "Unknown")) + continue + + status = result_dict[cid]["status"] + if status in INCOMPLETE_STATUSES: + logging.info( + f"Incomplete status for {name}, waiting...", + ) + change_queue.append((cid, name)) + else: + logging.info(f"Complete status for {name}") + completed_changes.append((name, status)) + + loop = task.LoopingCall(get_status) + loopDeferred = loop.start(interval) + + return loopDeferred.addCallback(lambda _: completed_changes) + + def _start_snap_task(self, action, *args, **kwargs): + """ + Kicks off the appropriate SNAP_METHOD for `action`. + + raises a `SnapdHttpException` in the event of issues. + """ + snap_method = self.SNAP_METHODS[action] + + response = snap_method(*args, **kwargs) + + if "change" not in response: + raise SnapdHttpException(response) + + return response + + def _respond(self, snap_results, opid, errors): + """ + Queues a response to Landscape Server based on the contents of + `results`. + + `completed` and `errored` are lists of snapd change ids. + Text error messages are stored in `errors`. + """ + logging.debug(f"Preparing snap change done response: {snap_results}") + + results = { + "completed": [], + "errored": [], + "errors": errors, + } + + for name, status in snap_results: + if status not in SUCCESS_STATUSES: + results["errored"].append(name) + results["errors"][name] = status + else: + results["completed"].append(name) + + message = { + "type": "operation-result", + "status": FAILED if results["errored"] or errors else SUCCEEDED, + "result-text": str(results), + "operation-id": opid, + } + + logging.debug("Sending snap-action-done response") + + # Kick off an immediate SnapMonitor message as well. + self._send_installed_snap_update() + return self.registry.broker.send_message( + message, + self._session_id, + True, + ) + + def _send_installed_snap_update(self): + try: + installed_snaps = self._snap_http.get_snaps() + except SnapdHttpException as e: + logging.error( + f"Unable to list installed snaps after snap change: {e}", + ) + return + + if installed_snaps: + return self.registry.broker.send_message( + { + "type": "snaps", + "snaps": installed_snaps, + }, + self._session_id, + True, + ) diff -Nru landscape-client-23.02/landscape/client/manager/store.py landscape-client-23.08/landscape/client/manager/store.py --- landscape-client-23.02/landscape/client/manager/store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/store.py 2023-08-17 21:19:32.000000000 +0000 @@ -6,8 +6,7 @@ from landscape.lib.apt.package.store import with_cursor -class ManagerStore(object): - +class ManagerStore: def __init__(self, filename): self._db = sqlite3.connect(filename) ensure_schema(self._db) @@ -16,7 +15,8 @@ def get_graph(self, cursor, graph_id): cursor.execute( "SELECT graph_id, filename, user FROM graph WHERE graph_id=?", - (graph_id,)) + (graph_id,), + ) return cursor.fetchone() @with_cursor @@ -28,16 +28,19 @@ def add_graph(self, cursor, graph_id, filename, user): cursor.execute( "SELECT graph_id FROM graph WHERE graph_id=?", - (graph_id,)) + (graph_id,), + ) if cursor.fetchone(): cursor.execute( "UPDATE graph SET filename=?, user=? WHERE graph_id=?", - (filename, user, graph_id)) + (filename, user, graph_id), + ) else: cursor.execute( "INSERT INTO graph (graph_id, filename, user) " "VALUES (?, ?, ?)", - (graph_id, filename, user)) + (graph_id, filename, user), + ) @with_cursor def remove_graph(self, cursor, graph_id): @@ -47,35 +50,46 @@ def set_graph_accumulate(self, cursor, graph_id, timestamp, value): cursor.execute( "SELECT graph_id, graph_timestamp, graph_value FROM " - "graph_accumulate WHERE graph_id=?", (graph_id,)) + "graph_accumulate WHERE graph_id=?", + (graph_id,), + ) graph_accumulate = cursor.fetchone() if graph_accumulate: cursor.execute( "UPDATE graph_accumulate SET graph_timestamp = ?, " "graph_value = ? WHERE graph_id=?", - (timestamp, value, graph_id)) + (timestamp, value, graph_id), + ) else: cursor.execute( "INSERT INTO graph_accumulate (graph_id, graph_timestamp, " - "graph_value) VALUES (?, ?, ?)", (graph_id, timestamp, value)) + "graph_value) VALUES (?, ?, ?)", + (graph_id, timestamp, value), + ) @with_cursor def get_graph_accumulate(self, cursor, graph_id): cursor.execute( "SELECT graph_id, graph_timestamp, graph_value FROM " - "graph_accumulate WHERE graph_id=?", (graph_id,)) + "graph_accumulate WHERE graph_id=?", + (graph_id,), + ) return cursor.fetchone() def ensure_schema(db): cursor = db.cursor() try: - cursor.execute("CREATE TABLE graph" - " (graph_id INTEGER PRIMARY KEY," - " filename TEXT NOT NULL, user TEXT)") - cursor.execute("CREATE TABLE graph_accumulate" - " (graph_id INTEGER PRIMARY KEY," - " graph_timestamp INTEGER, graph_value FLOAT)") + cursor.execute( + "CREATE TABLE graph" + " (graph_id INTEGER PRIMARY KEY," + " filename TEXT NOT NULL, user TEXT)", + ) + cursor.execute( + "CREATE TABLE graph_accumulate" + " (graph_id INTEGER PRIMARY KEY," + " graph_timestamp INTEGER, graph_value FLOAT)", + ) except sqlite3.OperationalError: cursor.close() db.rollback() diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_aptsources.py landscape-client-23.08/landscape/client/manager/tests/test_aptsources.py --- landscape-client-23.02/landscape/client/manager/tests/test_aptsources.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_aptsources.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,18 +1,18 @@ import os +from unittest import mock -import mock - -from twisted.internet.defer import Deferred, succeed +from twisted.internet.defer import Deferred +from twisted.internet.defer import succeed from landscape.client.manager.aptsources import AptSources -from landscape.client.manager.plugin import SUCCEEDED, FAILED - -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import SUCCEEDED from landscape.client.package.reporter import find_reporter_command +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper -class FakeStatResult(object): - +class FakeStatResult: def __init__(self, st_mode, st_uid, st_gid): self.st_mode = st_mode self.st_uid = st_uid @@ -23,11 +23,13 @@ helpers = [ManagerHelper] def setUp(self): - super(AptSourcesTests, self).setUp() + super().setUp() self.sourceslist = AptSources() self.sources_path = self.makeDir() - self.sourceslist.SOURCES_LIST = os.path.join(self.sources_path, - "sources.list") + self.sourceslist.SOURCES_LIST = os.path.join( + self.sources_path, + "sources.list", + ) sources_d = os.path.join(self.sources_path, "sources.list.d") os.mkdir(sources_d) self.sourceslist.SOURCES_LIST_D = sources_d @@ -51,16 +53,21 @@ sources.write("oki\n\ndoki\n#comment\n # other comment\n") self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [{"name": "bla", "content": b""}], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [{"name": "bla", "content": b""}], + "gpg-keys": [], + "operation-id": 1, + }, + ) with open(self.sourceslist.SOURCES_LIST) as sources: self.assertEqual( "# Landscape manages repositories for this computer\n" "# Original content of sources.list can be found in " - "sources.list.save\n", sources.read()) + "sources.list.save\n", + sources.read(), + ) def test_save_sources_list(self): """ @@ -71,16 +78,21 @@ sources.write("oki\n\ndoki\n#comment\n # other comment\n") self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [{"name": "bla", "content": b""}], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [{"name": "bla", "content": b""}], + "gpg-keys": [], + "operation-id": 1, + }, + ) - saved_sources_path = "{}.save".format(self.sourceslist.SOURCES_LIST) + saved_sources_path = f"{self.sourceslist.SOURCES_LIST}.save" self.assertTrue(os.path.exists(saved_sources_path)) with open(saved_sources_path) as saved_sources: - self.assertEqual("oki\n\ndoki\n#comment\n # other comment\n", - saved_sources.read()) + self.assertEqual( + "oki\n\ndoki\n#comment\n # other comment\n", + saved_sources.read(), + ) def test_existing_saved_sources_list(self): """ @@ -90,15 +102,18 @@ with open(self.sourceslist.SOURCES_LIST, "w") as sources: sources.write("oki\n\ndoki\n#comment\n # other comment\n") - saved_sources_path = "{}.save".format(self.sourceslist.SOURCES_LIST) + saved_sources_path = f"{self.sourceslist.SOURCES_LIST}.save" with open(saved_sources_path, "w") as saved_sources: saved_sources.write("original content\n") self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [{"name": "bla", "content": b""}], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [{"name": "bla", "content": b""}], + "gpg-keys": [], + "operation-id": 1, + }, + ) self.assertTrue(os.path.exists(saved_sources_path)) with open(saved_sources_path) as saved_sources: @@ -110,13 +125,18 @@ unicode content correctly. """ self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [{"name": "bla", "content": u"fancy content"}], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [{"name": "bla", "content": "fancy content"}], + "gpg-keys": [], + "operation-id": 1, + }, + ) saved_sources_path = os.path.join( - self.sourceslist.SOURCES_LIST_D, "landscape-bla.list") + self.sourceslist.SOURCES_LIST_D, + "landscape-bla.list", + ) self.assertTrue(os.path.exists(saved_sources_path)) with open(saved_sources_path, "rb") as saved_sources: self.assertEqual(b"fancy content", saved_sources.read()) @@ -126,7 +146,7 @@ When getting a repository message without sources, AptSources restores the previous contents of the sources.list file. """ - saved_sources_path = "{}.save".format(self.sourceslist.SOURCES_LIST) + saved_sources_path = f"{self.sourceslist.SOURCES_LIST}.save" with open(saved_sources_path, "w") as old_sources: old_sources.write("original content\n") @@ -134,10 +154,13 @@ sources.write("oki\n\ndoki\n#comment\n # other comment\n") self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [], + "gpg-keys": [], + "operation-id": 1, + }, + ) with open(self.sourceslist.SOURCES_LIST) as sources: self.assertEqual("original content\n", sources.read()) @@ -154,8 +177,11 @@ os.chmod(self.sourceslist.SOURCES_LIST, 0o400) sources_stat_orig = os.stat(self.sourceslist.SOURCES_LIST) - fake_stats = FakeStatResult(st_mode=sources_stat_orig.st_mode, - st_uid=30, st_gid=30) + fake_stats = FakeStatResult( + st_mode=sources_stat_orig.st_mode, + st_uid=30, + st_gid=30, + ) orig_stat = os.stat @@ -168,99 +194,202 @@ _mock_chown = mock.patch("os.chown") with _mock_stat as mock_stat, _mock_chown as mock_chown: self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [{"name": "bla", "content": b""}], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [{"name": "bla", "content": b""}], + "gpg-keys": [], + "operation-id": 1, + }, + ) service = self.broker_service - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "status": SUCCEEDED, "operation-id": 1}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 1, + }, + ], + ) mock_stat.assert_any_call(self.sourceslist.SOURCES_LIST) mock_chown.assert_any_call( - self.sourceslist.SOURCES_LIST, fake_stats.st_uid, - fake_stats.st_gid) + self.sourceslist.SOURCES_LIST, + fake_stats.st_uid, + fake_stats.st_gid, + ) sources_stat_after = os.stat(self.sourceslist.SOURCES_LIST) - self.assertEqual( - sources_stat_orig.st_mode, sources_stat_after.st_mode) + self.assertEqual(sources_stat_orig.st_mode, sources_stat_after.st_mode) def test_random_failures(self): """ If a failure happens during the manipulation of sources, the activity is reported as FAILED with the error message. """ + def buggy_source_handler(*args): raise RuntimeError("foo") self.sourceslist._handle_sources = buggy_source_handler self.manager.dispatch_message( - {"type": "apt-sources-replace", - "sources": [{"name": "bla", "content": b""}], - "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [{"name": "bla", "content": b""}], + "gpg-keys": [], + "operation-id": 1, + }, + ) msg = "RuntimeError: foo" service = self.broker_service - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "result-text": msg, "status": FAILED, - "operation-id": 1}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "result-text": msg, + "status": FAILED, + "operation-id": 1, + }, + ], + ) - def test_rename_sources_list_d(self): + def test_renames_sources_list_d(self): """ The sources files in sources.list.d are renamed to .save when a message - is received. + is received if config says to manage them, which is the default. """ - with open(os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"), - "w") as sources1: + with open( + os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"), + "w", + ) as sources1: sources1.write("ok\n") - with open(os.path.join(self.sourceslist.SOURCES_LIST_D, - "file2.list.save"), "w") as sources2: + with open( + os.path.join(self.sourceslist.SOURCES_LIST_D, "file2.list.save"), + "w", + ) as sources2: sources2.write("ok\n") self.manager.dispatch_message( - {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [], + "gpg-keys": [], + "operation-id": 1, + }, + ) self.assertFalse( os.path.exists( - os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"))) + os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"), + ), + ) self.assertTrue( os.path.exists( - os.path.join(self.sourceslist.SOURCES_LIST_D, - "file1.list.save"))) + os.path.join( + self.sourceslist.SOURCES_LIST_D, + "file1.list.save", + ), + ), + ) self.assertTrue( os.path.exists( - os.path.join(self.sourceslist.SOURCES_LIST_D, - "file2.list.save"))) + os.path.join( + self.sourceslist.SOURCES_LIST_D, + "file2.list.save", + ), + ), + ) + + def test_does_not_rename_sources_list_d(self): + """ + The sources files in sources.list.d are not renamed to .save when a + message is received if config says not to manage them. + """ + with open( + os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"), + "w", + ) as sources1: + sources1.write("ok\n") + + with open( + os.path.join(self.sourceslist.SOURCES_LIST_D, "file2.list.save"), + "w", + ) as sources2: + sources2.write("ok\n") + + self.manager.config.manage_sources_list_d = False + self.manager.dispatch_message( + { + "type": "apt-sources-replace", + "sources": [], + "gpg-keys": [], + "operation-id": 1, + }, + ) + + self.assertTrue( + os.path.exists( + os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"), + ), + ) + + self.assertFalse( + os.path.exists( + os.path.join( + self.sourceslist.SOURCES_LIST_D, + "file1.list.save", + ), + ), + ) + + self.assertTrue( + os.path.exists( + os.path.join( + self.sourceslist.SOURCES_LIST_D, + "file2.list.save", + ), + ), + ) def test_create_landscape_sources(self): """ For every sources listed in the sources field of the message, C{AptSources} creates a file with the content in sources.list.d. """ - sources = [{"name": "dev", "content": b"oki\n"}, - {"name": "lucid", "content": b"doki\n"}] + sources = [ + {"name": "dev", "content": b"oki\n"}, + {"name": "lucid", "content": b"doki\n"}, + ] self.manager.dispatch_message( - {"type": "apt-sources-replace", "sources": sources, "gpg-keys": [], - "operation-id": 1}) - - dev_file = os.path.join(self.sourceslist.SOURCES_LIST_D, - "landscape-dev.list") + { + "type": "apt-sources-replace", + "sources": sources, + "gpg-keys": [], + "operation-id": 1, + }, + ) + + dev_file = os.path.join( + self.sourceslist.SOURCES_LIST_D, + "landscape-dev.list", + ) self.assertTrue(os.path.exists(dev_file)) with open(dev_file) as file: result = file.read() self.assertEqual("oki\n", result) - lucid_file = os.path.join(self.sourceslist.SOURCES_LIST_D, - "landscape-lucid.list") + lucid_file = os.path.join( + self.sourceslist.SOURCES_LIST_D, + "landscape-lucid.list", + ) self.assertTrue(os.path.exists(lucid_file)) with open(lucid_file) as file: result = file.read() @@ -276,15 +405,19 @@ gpg_keys = ["key1", "key2"] self.manager.dispatch_message( - {"type": "apt-sources-replace", "sources": [], - "gpg-keys": gpg_keys, - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [], + "gpg-keys": gpg_keys, + "operation-id": 1, + }, + ) keys = [] gpg_dirpath = self.sourceslist.TRUSTED_GPG_D for filename in os.listdir(gpg_dirpath): filepath = os.path.join(gpg_dirpath, filename) - with open(filepath, 'r') as fh: + with open(filepath, "r") as fh: keys.append(fh.read()) self.assertCountEqual(keys, gpg_keys) @@ -298,16 +431,28 @@ def _run_process(command, args, env={}, path=None, uid=None, gid=None): self.assertEqual( - find_reporter_command(self.manager.config), command) - self.assertEqual(["--force-apt-update", "--config=%s" % - self.manager.config.config], args) + find_reporter_command(self.manager.config), + command, + ) + self.assertEqual( + [ + "--force-apt-update", + f"--config={self.manager.config.config}", + ], + args, + ) deferred.callback(("ok", "", 0)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( - {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], - "operation-id": 1}) + { + "type": "apt-sources-replace", + "sources": [], + "gpg-keys": [], + "operation-id": 1, + }, + ) return deferred diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_config.py landscape-client-23.08/landscape/client/manager/tests/test_config.py --- landscape-client-23.02/landscape/client/manager/tests/test_config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,20 +1,29 @@ -from landscape.client.tests.helpers import LandscapeTest -from landscape.client.manager.config import ManagerConfiguration, ALL_PLUGINS +from landscape.client.manager.config import ALL_PLUGINS +from landscape.client.manager.config import ManagerConfiguration from landscape.client.manager.scriptexecution import ALL_USERS +from landscape.client.tests.helpers import LandscapeTest class ManagerConfigurationTest(LandscapeTest): - def setUp(self): - super(ManagerConfigurationTest, self).setUp() + super().setUp() self.config = ManagerConfiguration() def test_plugin_factories(self): """By default all plugins are enabled.""" - self.assertEqual(["ProcessKiller", "PackageManager", "UserManager", - "ShutdownManager", "AptSources", "HardwareInfo", - "KeystoneToken"], - ALL_PLUGINS) + self.assertEqual( + [ + "ProcessKiller", + "PackageManager", + "UserManager", + "ShutdownManager", + "AptSources", + "HardwareInfo", + "KeystoneToken", + "SnapManager", + ], + ALL_PLUGINS, + ) self.assertEqual(ALL_PLUGINS, self.config.plugin_factories) def test_plugin_factories_with_manager_plugins(self): @@ -31,9 +40,11 @@ command line option. """ self.config.load(["--include-manager-plugins", "ScriptExecution"]) - self.assertEqual(len(self.config.plugin_factories), - len(ALL_PLUGINS) + 1) - self.assertTrue('ScriptExecution' in self.config.plugin_factories) + self.assertEqual( + len(self.config.plugin_factories), + len(ALL_PLUGINS) + 1, + ) + self.assertTrue("ScriptExecution" in self.config.plugin_factories) def test_get_allowed_script_users(self): """ @@ -55,5 +66,7 @@ as. """ self.config.load(["--script-users", "foo, bar,baz"]) - self.assertEqual(self.config.get_allowed_script_users(), - ["foo", "bar", "baz"]) + self.assertEqual( + self.config.get_allowed_script_users(), + ["foo", "bar", "baz"], + ) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_customgraph.py landscape-client-23.08/landscape/client/manager/tests/test_customgraph.py --- landscape-client-23.02/landscape/client/manager/tests/test_customgraph.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_customgraph.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,17 @@ +import logging import os import pwd -import logging - -import mock +from unittest import mock from twisted.internet.error import ProcessDone from twisted.python.failure import Failure from landscape.client.manager.customgraph import CustomGraphPlugin from landscape.client.manager.store import ManagerStore - -from landscape.lib.testing import StubProcessFactory, DummyProcess -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper +from landscape.lib.testing import DummyProcess +from landscape.lib.testing import StubProcessFactory class CustomGraphManagerTests(LandscapeTest): @@ -19,17 +19,17 @@ helpers = [ManagerHelper] def setUp(self): - super(CustomGraphManagerTests, self).setUp() + super().setUp() self.store = ManagerStore(":memory:") self.manager.store = self.store - self.broker_service.message_store.set_accepted_types( - ["custom-graph"]) + self.broker_service.message_store.set_accepted_types(["custom-graph"]) self.data_path = self.makeDir() self.manager.config.data_path = self.data_path os.makedirs(os.path.join(self.data_path, "custom-graph-scripts")) self.manager.config.script_users = "ALL" self.graph_manager = CustomGraphPlugin( - create_time=list(range(1500, 0, -300)).pop) + create_time=list(range(1500, 0, -300)).pop, + ) self.manager.add(self.graph_manager) def _exit_process_protocol(self, protocol, stdout): @@ -43,18 +43,29 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": username, + "graph-id": 123, + }, + ) self.assertEqual( self.store.get_graphs(), - [(123, - os.path.join(self.data_path, "custom-graph-scripts", - "graph-123"), - username)]) + [ + ( + 123, + os.path.join( + self.data_path, + "custom-graph-scripts", + "graph-123", + ), + username, + ), + ], + ) @mock.patch("pwd.getpwnam") def test_add_graph_unknown_user(self, mock_getpwnam): @@ -69,14 +80,17 @@ self.logger.setLevel(logging.ERROR) self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": "foo", - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": "foo", + "graph-id": 123, + }, + ) graph = self.store.get_graph(123) self.assertEqual(graph[0], 123) - self.assertEqual(graph[2], u"foo") + self.assertEqual(graph[2], "foo") self.assertTrue(error_message in self.logfile.getvalue()) mock_getpwnam.assert_called_with("foo") @@ -84,26 +98,36 @@ @mock.patch("os.chmod") @mock.patch("pwd.getpwnam") def test_add_graph_for_user(self, mock_getpwnam, mock_chmod, mock_chown): - - class pwnam(object): + class PwNam: pw_uid = 1234 pw_gid = 5678 pw_dir = self.makeFile() - mock_getpwnam.return_value = pwnam + mock_getpwnam.return_value = PwNam self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": "bar", - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": "bar", + "graph-id": 123, + }, + ) self.assertEqual( self.store.get_graphs(), - [(123, - os.path.join(self.data_path, "custom-graph-scripts", - "graph-123"), - "bar")]) + [ + ( + 123, + os.path.join( + self.data_path, + "custom-graph-scripts", + "graph-123", + ), + "bar", + ), + ], + ) mock_chown.assert_called_with(mock.ANY, 1234, 5678) mock_chmod.assert_called_with(mock.ANY, 0o700) @@ -111,15 +135,15 @@ def test_remove_unknown_graph(self): self.manager.dispatch_message( - {"type": "custom-graph-remove", - "graph-id": 123}) + {"type": "custom-graph-remove", "graph-id": 123}, + ) def test_remove_graph(self): - filename = self.makeFile(content='foo') - self.store.add_graph(123, filename, u"user") + filename = self.makeFile(content="foo") + self.store.add_graph(123, filename, "user") self.manager.dispatch_message( - {"type": "custom-graph-remove", - "graph-id": 123}) + {"type": "custom-graph-remove", "graph-id": 123}, + ) self.assertFalse(os.path.exists(filename)) def test_run(self): @@ -132,10 +156,20 @@ script_hash = b"483f2304b49063680c75e3c9e09cf6d0" self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": {123: {"error": u"", - "values": [(300, 1.0)], - "script-hash": script_hash}}, - "type": "custom-graph"}]) + [ + { + "data": { + 123: { + "error": "", + "values": [(300, 1.0)], + "script-hash": script_hash, + }, + }, + "type": "custom-graph", + }, + ], + ) + return self.graph_manager.run().addCallback(check) def test_run_multiple(self): @@ -151,17 +185,29 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: {"error": u"", - "values": [(300, 1.0)], - "script-hash": b"483f2304b49063680c75e3c9e09cf6d0", - }, - 124: {"error": u"", - "values": [(300, 2.0)], - "script-hash": b"73a74b1530b2256db7edacb9b9cc385e", + [ + { + "data": { + 123: { + "error": "", + "values": [(300, 1.0)], + "script-hash": ( + b"483f2304b49063680c75e3c9e09cf6d0" + ), }, - }, - "type": "custom-graph"}]) + 124: { + "error": "", + "values": [(300, 2.0)], + "script-hash": ( + b"73a74b1530b2256db7edacb9b9cc385e" + ), + }, + }, + "type": "custom-graph", + }, + ], + ) + return self.graph_manager.run().addCallback(check) def test_run_with_nonzero_exit_code(self): @@ -173,14 +219,22 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": u" (process exited with code 1)", - "values": [], - "script-hash": b"eaca3ba1a3bf1948876eba320148c5e9", - } + [ + { + "data": { + 123: { + "error": " (process exited with code 1)", + "values": [], + "script-hash": ( + b"eaca3ba1a3bf1948876eba320148c5e9" + ), + }, + }, + "type": "custom-graph", }, - "type": "custom-graph"}]) + ], + ) + return self.graph_manager.run().addCallback(check) def test_run_cast_result_error(self): @@ -200,15 +254,25 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": (u"InvalidFormatError: Failed to convert to " - u"number: 'foobar'"), - "values": [], - "script-hash": b"baab6c16d9143523b7865d46896e4596", + [ + { + "data": { + 123: { + "error": ( + "InvalidFormatError: Failed to convert to " + "number: 'foobar'" + ), + "values": [], + "script-hash": ( + b"baab6c16d9143523b7865d46896e4596" + ), + }, }, + "type": "custom-graph", }, - "type": "custom-graph"}]) + ], + ) + return result.addCallback(check) def test_run_no_output_error(self): @@ -228,15 +292,25 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": (u"NoOutputError: Script did not output " - u"any value"), - "values": [], - "script-hash": b"baab6c16d9143523b7865d46896e4596", + [ + { + "data": { + 123: { + "error": ( + "NoOutputError: Script did not output " + "any value" + ), + "values": [], + "script-hash": ( + b"baab6c16d9143523b7865d46896e4596" + ), + }, }, + "type": "custom-graph", }, - "type": "custom-graph"}]) + ], + ) + return result.addCallback(check) def test_run_no_output_error_with_other_result(self): @@ -258,18 +332,32 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": (u"NoOutputError: Script did not output " - u"any value"), - "script-hash": b"baab6c16d9143523b7865d46896e4596", - "values": []}, - 124: { - "error": u"", - "script-hash": b"baab6c16d9143523b7865d46896e4596", - "values": [(300, 0.5)]}, - }, - "type": "custom-graph"}]) + [ + { + "data": { + 123: { + "error": ( + "NoOutputError: Script did not output " + "any value" + ), + "script-hash": ( + b"baab6c16d9143523b7865d46896e4596" + ), + "values": [], + }, + 124: { + "error": "", + "script-hash": ( + b"baab6c16d9143523b7865d46896e4596" + ), + "values": [(300, 0.5)], + }, + }, + "type": "custom-graph", + }, + ], + ) + return result.addCallback(check) def test_multiple_errors(self): @@ -291,19 +379,35 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": (u"InvalidFormatError: Failed to convert " - u"to number: 'foo'"), - "script-hash": b"baab6c16d9143523b7865d46896e4596", - "values": []}, - 124: { - "error": (u"NoOutputError: Script did not output " - u"any value"), - "script-hash": b"baab6c16d9143523b7865d46896e4596", - "values": []}, - }, - "type": "custom-graph"}]) + [ + { + "data": { + 123: { + "error": ( + "InvalidFormatError: Failed to convert " + "to number: 'foo'" + ), + "script-hash": ( + b"baab6c16d9143523b7865d46896e4596" + ), + "values": [], + }, + 124: { + "error": ( + "NoOutputError: Script did not output " + "any value" + ), + "script-hash": ( + b"baab6c16d9143523b7865d46896e4596" + ), + "values": [], + }, + }, + "type": "custom-graph", + }, + ], + ) + return result.addCallback(check) @mock.patch("pwd.getpwnam") @@ -313,12 +417,12 @@ factory = StubProcessFactory() self.graph_manager.process_factory = factory - class pwnam(object): + class PwNam: pw_uid = 1234 pw_gid = 5678 pw_dir = self.makeFile() - mock_getpwnam.return_value = pwnam + mock_getpwnam.return_value = PwNam result = self.graph_manager.run() @@ -353,14 +457,22 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": (u"ProhibitedUserError: Custom graph cannot " - u"be run as user %s") % (username,), - "script-hash": b"", - "values": []}, + [ + { + "data": { + 123: { + "error": ( + "ProhibitedUserError: Custom graph cannot " + f"be run as user {username}" + ), + "script-hash": b"", + "values": [], + }, + }, + "type": "custom-graph", }, - "type": "custom-graph"}]) + ], + ) return result.addCallback(check) @@ -381,13 +493,21 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": u"UnknownUserError: Unknown user 'foo'", - "script-hash": b"", - "values": []}, + [ + { + "data": { + 123: { + "error": ( + "UnknownUserError: Unknown user 'foo'" + ), + "script-hash": b"", + "values": [], + }, + }, + "type": "custom-graph", }, - "type": "custom-graph"}]) + ], + ) mock_getpwnam.assert_called_with("foo") return result.addCallback(check) @@ -412,13 +532,23 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": u"Process exceeded the 10 seconds limit", - "script-hash": b"9893532233caff98cd083a116b013c0b", - "values": []}, + [ + { + "data": { + 123: { + "error": ( + "Process exceeded the 10 seconds limit" + ), + "script-hash": ( + b"9893532233caff98cd083a116b013c0b" + ), + "values": [], + }, + }, + "type": "custom-graph", }, - "type": "custom-graph"}]) + ], + ) return result.addCallback(check) @@ -437,8 +567,15 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": {123: {"error": u"", "script-hash": b"", "values": []}}, - "type": "custom-graph"}]) + [ + { + "data": { + 123: {"error": "", "script-hash": b"", "values": []}, + }, + "type": "custom-graph", + }, + ], + ) def test_send_message_add_stored_graph(self): """ @@ -449,21 +586,32 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": username, + "graph-id": 123, + }, + ) self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"api": b"3.2", - "data": {123: {"error": u"", - "script-hash": - b"e00a2f44dbc7b6710ce32af2348aec9b", - "values": []}}, - "timestamp": 0, - "type": "custom-graph"}]) + [ + { + "api": b"3.2", + "data": { + 123: { + "error": "", + "script-hash": b"e00a2f44dbc7b6710ce32af2348aec9b", + "values": [], + }, + }, + "timestamp": 0, + "type": "custom-graph", + }, + ], + ) def test_send_message_check_not_present_graph(self): """C{send_message} checks the presence of the custom-graph script.""" @@ -471,20 +619,28 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": username, + "graph-id": 123, + }, + ) filename = self.store.get_graph(123)[1] os.unlink(filename) self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"api": b"3.2", - "data": {}, - "timestamp": 0, - "type": "custom-graph"}]) + [ + { + "api": b"3.2", + "data": {}, + "timestamp": 0, + "type": "custom-graph", + }, + ], + ) def test_send_message_dont_rehash(self): """ @@ -495,67 +651,102 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": username, + "graph-id": 123, + }, + ) self.graph_manager.exchange() self.graph_manager._get_script_hash = lambda x: 1 / 0 self.graph_manager.do_send = True self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"api": b"3.2", - "data": {123: {"error": u"", - "script-hash": - b"e00a2f44dbc7b6710ce32af2348aec9b", - "values": []}}, - "timestamp": 0, - "type": "custom-graph"}, - {"api": b"3.2", - "data": {123: {"error": u"", - "script-hash": - b"e00a2f44dbc7b6710ce32af2348aec9b", - "values": []}}, - "timestamp": 0, - "type": "custom-graph"}]) + [ + { + "api": b"3.2", + "data": { + 123: { + "error": "", + "script-hash": b"e00a2f44dbc7b6710ce32af2348aec9b", + "values": [], + }, + }, + "timestamp": 0, + "type": "custom-graph", + }, + { + "api": b"3.2", + "data": { + 123: { + "error": "", + "script-hash": b"e00a2f44dbc7b6710ce32af2348aec9b", + "values": [], + }, + }, + "timestamp": 0, + "type": "custom-graph", + }, + ], + ) def test_send_message_rehash_if_necessary(self): uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo hi!", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo hi!", + "username": username, + "graph-id": 123, + }, + ) self.graph_manager.exchange() self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo bye!", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo bye!", + "username": username, + "graph-id": 123, + }, + ) self.graph_manager.do_send = True self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"api": b"3.2", - "data": {123: {"error": u"", - "script-hash": - b"e00a2f44dbc7b6710ce32af2348aec9b", - "values": []}}, - "timestamp": 0, - "type": "custom-graph"}, - {"api": b"3.2", - "data": {123: {"error": u"", - "script-hash": - b"d483816dc0fbb51ede42502a709b0e2a", - "values": []}}, - "timestamp": 0, - "type": "custom-graph"}]) + [ + { + "api": b"3.2", + "data": { + 123: { + "error": "", + "script-hash": b"e00a2f44dbc7b6710ce32af2348aec9b", + "values": [], + }, + }, + "timestamp": 0, + "type": "custom-graph", + }, + { + "api": b"3.2", + "data": { + 123: { + "error": "", + "script-hash": b"d483816dc0fbb51ede42502a709b0e2a", + "values": [], + }, + }, + "timestamp": 0, + "type": "custom-graph", + }, + ], + ) def test_run_with_script_updated(self): """ @@ -567,11 +758,14 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo 1.0", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo 1.0", + "username": username, + "graph-id": 123, + }, + ) factory = StubProcessFactory() self.graph_manager.process_factory = factory @@ -581,11 +775,14 @@ spawn = factory.spawns[0] self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo 2.0", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo 2.0", + "username": username, + "graph-id": 123, + }, + ) self._exit_process_protocol(spawn[0], b"1.0") @@ -593,13 +790,23 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"api": b"3.2", - "data": {123: {"error": u"", - "script-hash": - b"991e15a81929c79fe1d243b2afd99c62", - "values": []}}, - "timestamp": 0, - "type": "custom-graph"}]) + [ + { + "api": b"3.2", + "data": { + 123: { + "error": "", + "script-hash": ( + b"991e15a81929c79fe1d243b2afd99c62" + ), + "values": [], + }, + }, + "timestamp": 0, + "type": "custom-graph", + }, + ], + ) return result.addCallback(check) @@ -612,11 +819,14 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo 1.0", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo 1.0", + "username": username, + "graph-id": 123, + }, + ) factory = StubProcessFactory() self.graph_manager.process_factory = factory @@ -626,8 +836,8 @@ spawn = factory.spawns[0] self.manager.dispatch_message( - {"type": "custom-graph-remove", - "graph-id": 123}) + {"type": "custom-graph-remove", "graph-id": 123}, + ) self._exit_process_protocol(spawn[0], b"1.0") @@ -635,8 +845,16 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"api": b"3.2", "data": {}, "timestamp": 0, "type": - "custom-graph"}]) + [ + { + "api": b"3.2", + "data": {}, + "timestamp": 0, + "type": "custom-graph", + }, + ], + ) + return result.addCallback(check) def test_run_not_accepted_types(self): @@ -650,11 +868,14 @@ info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( - {"type": "custom-graph-add", - "interpreter": "/bin/sh", - "code": "echo 1.0", - "username": username, - "graph-id": 123}) + { + "type": "custom-graph-add", + "interpreter": "/bin/sh", + "code": "echo 1.0", + "username": username, + "graph-id": 123, + }, + ) factory = StubProcessFactory() self.graph_manager.process_factory = factory @@ -671,7 +892,8 @@ of results. """ self.graph_manager.registry.broker.call_if_accepted = ( - lambda *args: 1 / 0) + lambda *args: 1 / 0 + ) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() @@ -685,7 +907,7 @@ Using a non-existent user containing unicode characters fails with the appropriate error message. """ - username = u"non-existent-f\N{LATIN SMALL LETTER E WITH ACUTE}e" + username = "non-existent-f\N{LATIN SMALL LETTER E WITH ACUTE}e" self.manager.config.script_users = "ALL" @@ -701,12 +923,21 @@ self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": { - 123: { - "error": - u"UnknownUserError: Unknown user '%s'" % username, - "script-hash": b"9893532233caff98cd083a116b013c0b", - "values": []}}, - "type": "custom-graph"}]) + [ + { + "data": { + 123: { + "error": "UnknownUserError: " + f"Unknown user '{username}'", + "script-hash": ( + b"9893532233caff98cd083a116b013c0b" + ), + "values": [], + }, + }, + "type": "custom-graph", + }, + ], + ) return result.addCallback(check) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_fakepackagemanager.py landscape-client-23.08/landscape/client/manager/tests/test_fakepackagemanager.py --- landscape-client-23.02/landscape/client/manager/tests/test_fakepackagemanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_fakepackagemanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,7 @@ -from landscape.client.manager.plugin import SUCCEEDED - from landscape.client.manager.fakepackagemanager import FakePackageManager -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper +from landscape.client.manager.plugin import SUCCEEDED +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper class FakePackageManagerTest(LandscapeTest): @@ -10,7 +10,7 @@ helpers = [ManagerHelper] def setUp(self): - super(FakePackageManagerTest, self).setUp() + super().setUp() self.package_manager = FakePackageManager() self.package_manager.randint = lambda x, y: 0 @@ -26,10 +26,17 @@ self.manager.dispatch_message(message) self.manager.reactor.advance(1) - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "change-packages-result", - "result-text": "OK done.", - "result-code": 1, "operation-id": 1}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "change-packages-result", + "result-text": "OK done.", + "result-code": 1, + "operation-id": 1, + }, + ], + ) def test_handle_change_package_locks(self): """ @@ -43,12 +50,18 @@ self.manager.dispatch_message(message) self.manager.reactor.advance(1) - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "result-text": - "Package locks successfully changed.", - "result-code": 0, "status": SUCCEEDED, - "operation-id": 1}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "result-text": "Package locks successfully changed.", + "result-code": 0, + "status": SUCCEEDED, + "operation-id": 1, + }, + ], + ) def test_handle_release_upgrade(self): """ @@ -62,9 +75,15 @@ self.manager.dispatch_message(message) self.manager.reactor.advance(1) - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "result-text": - "Successful release upgrade.", - "result-code": 0, "status": SUCCEEDED, - "operation-id": 1}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "result-text": "Successful release upgrade.", + "result-code": 0, + "status": SUCCEEDED, + "operation-id": 1, + }, + ], + ) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_hardwareinfo.py landscape-client-23.08/landscape/client/manager/tests/test_hardwareinfo.py --- landscape-client-23.02/landscape/client/manager/tests/test_hardwareinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_hardwareinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,13 +1,13 @@ -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper - from landscape.client.manager.hardwareinfo import HardwareInfo +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper class HardwareInfoTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): - super(HardwareInfoTests, self).setUp() + super().setUp() self.info = HardwareInfo() self.info.command = "/bin/echo" self.manager.add(self.info) @@ -24,7 +24,8 @@ def check(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": u"-xml -quiet\n", "type": "hardware-info"}]) + [{"data": "-xml -quiet\n", "type": "hardware-info"}], + ) return deferred.addCallback(check) @@ -40,7 +41,8 @@ def check(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": u"-xml -quiet\n", "type": "hardware-info"}]) + [{"data": "-xml -quiet\n", "type": "hardware-info"}], + ) return deferred.addCallback(check) @@ -57,7 +59,8 @@ def check(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"data": u"-xml -quiet\n", "type": "hardware-info"}]) + [{"data": "-xml -quiet\n", "type": "hardware-info"}], + ) self.assertEqual([], calls) return deferred.addCallback(check) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_keystonetoken.py landscape-client-23.08/landscape/client/manager/tests/test_keystonetoken.py --- landscape-client-23.02/landscape/client/manager/tests/test_keystonetoken.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_keystonetoken.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,9 @@ import os -from landscape.client.tests.helpers import LandscapeTest from landscape.client.manager.keystonetoken import KeystoneToken -from landscape.client.tests.helpers import ManagerHelper, FakePersist +from landscape.client.tests.helpers import FakePersist +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper class KeystoneTokenTest(LandscapeTest): @@ -10,7 +11,7 @@ helpers = [ManagerHelper] def setUp(self): - super(KeystoneTokenTest, self).setUp() + super().setUp() self.keystone_file = os.path.join(self.makeDir(), "keystone.conf") self.plugin = KeystoneToken(self.keystone_file) @@ -45,7 +46,8 @@ """ self.makeFile( path=self.keystone_file, - content="[DEFAULT]\nadmin_token = foobar") + content="[DEFAULT]\nadmin_token = foobar", + ) # As we allow arbitrary bytes, we also need bytes here. self.assertEqual(b"foobar", self.plugin.get_data()) @@ -54,10 +56,7 @@ The data can be arbitrary bytes. """ content = b"[DEFAULT]\nadmin_token = \xff" - self.makeFile( - path=self.keystone_file, - content=content, - mode="wb") + self.makeFile(path=self.keystone_file, content=content, mode="wb") self.assertEqual(b"\xff", self.plugin.get_data()) def test_get_message(self): @@ -67,12 +66,14 @@ """ self.makeFile( path=self.keystone_file, - content="[DEFAULT]\nadmin_token = foobar") + content="[DEFAULT]\nadmin_token = foobar", + ) self.plugin.register(self.manager) message = self.plugin.get_message() self.assertEqual( - {'type': 'keystone-token', 'data': b'foobar'}, - message) + {"type": "keystone-token", "data": b"foobar"}, + message, + ) message = self.plugin.get_message() self.assertIs(None, message) @@ -82,8 +83,10 @@ creates the perists file. """ flush_interval = self.config.flush_interval - persist_filename = os.path.join(self.config.data_path, - "keystone.bpickle") + persist_filename = os.path.join( + self.config.data_path, + "keystone.bpickle", + ) self.assertFalse(os.path.exists(persist_filename)) self.manager.add(self.plugin) @@ -139,8 +142,10 @@ If the plugin could not extract the C{admin_token} from the Keystone config file, upon exchange, C{None} is returned. """ - self.makeFile(path=self.keystone_file, - content="[DEFAULT]\nadmin_token =") + self.makeFile( + path=self.keystone_file, + content="[DEFAULT]\nadmin_token =", + ) self.manager.add(self.plugin) def check(result): diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_manager.py landscape-client-23.08/landscape/client/manager/tests/test_manager.py --- landscape-client-23.02/landscape/client/manager/tests/test_manager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_manager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,6 @@ from landscape.client.manager.store import ManagerStore - -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper class ManagerTest(LandscapeTest): diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_packagemanager.py landscape-client-23.08/landscape/client/manager/tests/test_packagemanager.py --- landscape-client-23.02/landscape/client/manager/tests/test_packagemanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_packagemanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,15 @@ -import mock -import os import os.path +from unittest import mock from twisted.internet.defer import Deferred +from landscape.client.manager.packagemanager import PackageManager from landscape.client.package.changer import PackageChanger from landscape.client.package.releaseupgrader import ReleaseUpgrader +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper from landscape.lib.apt.package.store import PackageStore - from landscape.lib.testing import EnvironSaverHelper -from landscape.client.manager.packagemanager import PackageManager -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper class PackageManagerTest(LandscapeTest): @@ -20,10 +19,11 @@ def setUp(self): """Initialize test helpers and create a sample package store.""" - super(PackageManagerTest, self).setUp() + super().setUp() self.config = self.broker_service.config - self.package_store = PackageStore(os.path.join(self.data_path, - "package/database")) + self.package_store = PackageStore( + os.path.join(self.data_path, "package/database"), + ) self.package_manager = PackageManager() def test_create_default_store_upon_message_handling(self): @@ -49,8 +49,10 @@ self.manager.add(self.package_manager) with mock.patch.object(self.package_manager, "spawn_handler"): self.package_manager.run() - self.assertNotIn(PackageChanger, - self.package_manager.spawn_handler.call_args_list) + self.assertNotIn( + PackageChanger, + self.package_manager.spawn_handler.call_args_list, + ) self.assertEqual(0, self.package_manager.spawn_handler.call_count) def test_dont_spawn_release_upgrader_if_message_not_accepted(self): @@ -61,8 +63,10 @@ self.manager.add(self.package_manager) with mock.patch.object(self.package_manager, "spawn_handler"): self.package_manager.run() - self.assertNotIn(ReleaseUpgrader, - self.package_manager.spawn_handler.call_args_list) + self.assertNotIn( + ReleaseUpgrader, + self.package_manager.spawn_handler.call_args_list, + ) self.assertEqual(0, self.package_manager.spawn_handler.call_count) def test_spawn_handler_on_registration_when_already_accepted(self): @@ -79,15 +83,20 @@ return run_result_deferred.chainDeferred(deferred) with mock.patch.object(self.package_manager, "spawn_handler"): - with mock.patch.object(self.package_manager, "run", - side_effect=run_has_run): + with mock.patch.object( + self.package_manager, + "run", + side_effect=run_has_run, + ): service = self.broker_service service.message_store.set_accepted_types( - ["change-packages-result"]) + ["change-packages-result"], + ) self.manager.add(self.package_manager) self.successResultOf(deferred) self.package_manager.spawn_handler.assert_called_once_with( - PackageChanger) + PackageChanger, + ) self.package_manager.run.assert_called_once_with() def test_spawn_changer_on_run_if_message_accepted(self): @@ -102,7 +111,8 @@ self.manager.add(self.package_manager) self.package_manager.run() self.package_manager.spawn_handler.assert_called_with( - PackageChanger) + PackageChanger, + ) # Method is called once for registration, then again explicitly. self.assertEquals(2, self.package_manager.spawn_handler.call_count) @@ -119,7 +129,8 @@ self.manager.add(self.package_manager) self.manager.reactor.fire("package-data-changed")[0] self.package_manager.spawn_handler.assert_called_with( - PackageChanger) + PackageChanger, + ) # Method is called once for registration, then again explicitly. self.assertEquals(2, self.package_manager.spawn_handler.call_count) @@ -135,7 +146,8 @@ self.manager.add(self.package_manager) self.package_manager.run() self.package_manager.spawn_handler.assert_called_with( - ReleaseUpgrader) + ReleaseUpgrader, + ) # Method is called once for registration, then again explicitly. self.assertEquals(2, self.package_manager.spawn_handler.call_count) @@ -149,7 +161,8 @@ self.assertTrue(task) self.assertEqual(task.data, message) self.package_manager.spawn_handler.assert_called_once_with( - PackageChanger) + PackageChanger, + ) def test_change_packages_handling_with_reboot(self): self.manager.add(self.package_manager) @@ -161,7 +174,8 @@ self.assertTrue(task) self.assertEqual(task.data, message) self.package_manager.spawn_handler.assert_called_once_with( - PackageChanger) + PackageChanger, + ) def test_release_upgrade_handling(self): """ @@ -178,7 +192,8 @@ self.assertTrue(task) self.assertEqual(task.data, message) self.package_manager.spawn_handler.assert_called_once_with( - ReleaseUpgrader) + ReleaseUpgrader, + ) def test_spawn_changer(self): """ @@ -188,7 +203,8 @@ command = self.write_script( self.config, "landscape-package-changer", - "#!/bin/sh\necho 'I am the changer!' >&2\n") + "#!/bin/sh\necho 'I am the changer!' >&2\n", + ) self.manager.config = self.config self.package_store.add_task("changer", "Do something!") @@ -211,7 +227,8 @@ command = self.write_script( self.config, "landscape-release-upgrader", - "#!/bin/sh\necho 'I am the upgrader!' >&2\n") + "#!/bin/sh\necho 'I am the upgrader!' >&2\n", + ) self.manager.config = self.config self.package_store.add_task("release-upgrader", "Do something!") @@ -229,7 +246,8 @@ self.write_script( self.config, "landscape-package-changer", - "#!/bin/sh\n/bin/true") + "#!/bin/sh\n/bin/true", + ) self.manager.config = self.config self.package_store.add_task("changer", "Do something!") @@ -247,7 +265,8 @@ command = self.write_script( self.config, "landscape-package-changer", - "#!/bin/sh\necho VAR: $VAR\n") + "#!/bin/sh\necho VAR: $VAR\n", + ) self.manager.config = self.config self.manager.add(self.package_manager) @@ -267,7 +286,8 @@ command = self.write_script( self.config, "landscape-package-changer", - "#!/bin/sh\necho OPTIONS: $@\n") + "#!/bin/sh\necho OPTIONS: $@\n", + ) self.manager.config = self.config self.manager.add(self.package_manager) @@ -298,7 +318,8 @@ self.write_script( self.config, "landscape-package-changer", - "#!/bin/sh\necho RUN\n") + "#!/bin/sh\necho RUN\n", + ) self.manager.config = self.config cwd = os.getcwd() @@ -334,4 +355,5 @@ self.assertTrue(task) self.assertEqual(task.data, message) self.package_manager.spawn_handler.assert_called_once_with( - PackageChanger) + PackageChanger, + ) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_plugin.py landscape-client-23.08/landscape/client/manager/tests/test_plugin.py --- landscape-client-23.02/landscape/client/manager/tests/test_plugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_plugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,10 @@ from twisted.internet.defer import Deferred +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import ManagerPlugin +from landscape.client.manager.plugin import SUCCEEDED from landscape.client.tests.helpers import LandscapeTest from landscape.client.tests.helpers import ManagerHelper -from landscape.client.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED class BrokerPluginTest(LandscapeTest): @@ -19,14 +21,22 @@ broker_service = self.broker_service broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 12312} - operation = (lambda: None) + + def operation(): + return None def assert_messages(ignored): messages = broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 12312}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 12312, + }, + ], + ) result = plugin.call_with_operation_result(message, operation) return result.addCallback(assert_messages) @@ -48,10 +58,17 @@ def assert_messages(ignored): messages = broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", "status": FAILED, - "result-text": "RuntimeError: What the " - "crap!", "operation-id": 12312}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": FAILED, + "result-text": "RuntimeError: What the crap!", + "operation-id": 12312, + }, + ], + ) logdata = self.logfile.getvalue() self.assertTrue("RuntimeError: What the crap!" in logdata, logdata) @@ -67,7 +84,9 @@ broker_service = self.broker_service broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 123} - operation = (lambda: None) + + def operation(): + return None def assert_urgency(ignored): self.assertTrue(broker_service.exchanger.is_urgent()) @@ -85,15 +104,23 @@ broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 12312} deferred = Deferred() - operation = (lambda: deferred) + + def operation(): + return deferred def assert_messages(ignored): messages = broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", - "result-text": "blah", - "status": SUCCEEDED, - "operation-id": 12312}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "result-text": "blah", + "status": SUCCEEDED, + "operation-id": 12312, + }, + ], + ) result = plugin.call_with_operation_result(message, operation) result.addCallback(assert_messages) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_processkiller.py landscape-client-23.08/landscape/client/manager/tests/test_processkiller.py --- landscape-client-23.02/landscape/client/manager/tests/test_processkiller.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_processkiller.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,23 +1,27 @@ -from datetime import datetime -from mock import patch import signal import subprocess +from datetime import datetime +from unittest.mock import patch +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import SUCCEEDED +from landscape.client.manager.processkiller import ProcessKiller +from landscape.client.manager.processkiller import ProcessMismatchError +from landscape.client.manager.processkiller import ProcessNotFoundError +from landscape.client.manager.processkiller import SignalProcessError +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper from landscape.lib.process import ProcessInformation from landscape.lib.testing import ProcessDataBuilder -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper - -from landscape.client.manager.plugin import SUCCEEDED, FAILED -from landscape.client.manager.processkiller import ( - ProcessKiller, ProcessNotFoundError, ProcessMismatchError, - SignalProcessError) def get_active_process(): - return subprocess.Popen(["python3", "-c", "input()"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + return subprocess.Popen( + ["python3", "-c", "input()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) def get_missing_pid(): @@ -35,8 +39,11 @@ LandscapeTest.setUp(self) self.sample_dir = self.makeDir() self.builder = ProcessDataBuilder(self.sample_dir) - self.process_info = ProcessInformation(proc_dir=self.sample_dir, - jiffies=1, boot_time=10) + self.process_info = ProcessInformation( + proc_dir=self.sample_dir, + jiffies=1, + boot_time=10, + ) self.signaller = ProcessKiller(process_info=self.process_info) service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) @@ -44,15 +51,25 @@ @patch("os.kill") def _test_signal_name(self, signame, signum, kill_mock): self.manager.add(self.signaller) - self.builder.create_data(100, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=10, - process_name="ooga") + self.builder.create_data( + 100, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="ooga", + ) self.manager.dispatch_message( - {"type": "signal-process", - "operation-id": 1, - "pid": 100, "name": "ooga", - "start-time": 20, "signal": signame}) + { + "type": "signal-process", + "operation-id": 1, + "pid": 100, + "name": "ooga", + "start-time": 20, + "signal": signame, + }, + ) kill_mock.assert_called_once_with(100, signum) def test_kill_process_signal(self): @@ -86,10 +103,15 @@ start_time = process_info["start-time"] self.manager.dispatch_message( - {"type": "signal-process", - "operation-id": 1, - "pid": popen.pid, "name": "python", - "start-time": start_time, "signal": signame}) + { + "type": "signal-process", + "operation-id": 1, + "pid": popen.pid, + "name": "python", + "start-time": start_time, + "signal": signame, + }, + ) # We're waiting on the child process here so that we (the # parent process) consume it's return code; this prevents it # from becoming a zombie and makes the test do a better job of @@ -102,9 +124,16 @@ self.assertEqual(process_info, None) service = self.broker_service - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "status": SUCCEEDED, "operation-id": 1}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 1, + }, + ], + ) def test_kill_real_process(self): self._test_signal_real_process("KILL") @@ -122,19 +151,33 @@ pid = get_missing_pid() self.manager.dispatch_message( - {"operation-id": 1, "type": "signal-process", - "pid": pid, "name": "zsh", "start-time": 110, - "signal": "KILL"}) - expected_text = ("ProcessNotFoundError: The process zsh with PID %d " - "that started at 1970-01-01 00:01:50 UTC was not " - "found" % (pid,)) + { + "operation-id": 1, + "type": "signal-process", + "pid": pid, + "name": "zsh", + "start-time": 110, + "signal": "KILL", + }, + ) + expected_text = ( + f"ProcessNotFoundError: The process zsh with PID {pid:d} " + "that started at 1970-01-01 00:01:50 UTC was not " + "found" + ) service = self.broker_service - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 1, - "status": FAILED, - "result-text": expected_text}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 1, + "status": FAILED, + "result-text": expected_text, + }, + ], + ) self.assertTrue("ProcessNotFoundError" in self.logfile.getvalue()) def test_signal_process_start_time_mismatch(self): @@ -145,29 +188,47 @@ self.log_helper.ignore_errors(ProcessMismatchError) self.manager.add(self.signaller) pid = get_missing_pid() - self.builder.create_data(pid, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=10, - process_name="hostname") + self.builder.create_data( + pid, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="hostname", + ) self.manager.dispatch_message( - {"operation-id": 1, "type": "signal-process", - "pid": pid, "name": "python", - "start-time": 11, "signal": "KILL"}) + { + "operation-id": 1, + "type": "signal-process", + "pid": pid, + "name": "python", + "start-time": 11, + "signal": "KILL", + }, + ) expected_time = datetime.utcfromtimestamp(11) # boot time + proc start time = 20 actual_time = datetime.utcfromtimestamp(20) - expected_text = ("ProcessMismatchError: The process python with " - "PID %d that started at %s UTC was not found. A " - "process with the same PID that started at %s UTC " - "was found and not sent the KILL signal" - % (pid, expected_time, actual_time)) + expected_text = ( + "ProcessMismatchError: The process python with " + f"PID {pid:d} that started at {expected_time} UTC was not " + "found. A process with the same PID that started " + f"at {actual_time} UTC was found and not sent the KILL signal" + ) service = self.broker_service - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 1, - "status": FAILED, - "result-text": expected_text}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 1, + "status": FAILED, + "result-text": expected_text, + }, + ], + ) self.assertTrue("ProcessMismatchError" in self.logfile.getvalue()) def test_signal_process_race(self): @@ -179,28 +240,51 @@ """ self.log_helper.ignore_errors(SignalProcessError) pid = get_missing_pid() - self.builder.create_data(pid, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=10, - process_name="hostname") - self.assertRaises(SignalProcessError, - self.signaller.signal_process, pid, - "hostname", 20, "KILL") + self.builder.create_data( + pid, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="hostname", + ) + self.assertRaises( + SignalProcessError, + self.signaller.signal_process, + pid, + "hostname", + 20, + "KILL", + ) self.manager.add(self.signaller) self.manager.dispatch_message( - {"operation-id": 1, "type": "signal-process", - "pid": pid, "name": "hostname", "start-time": 20, - "signal": "KILL"}) - expected_text = ("SignalProcessError: Attempting to send the KILL " - "signal to the process hostname with PID %d failed" - % (pid,)) + { + "operation-id": 1, + "type": "signal-process", + "pid": pid, + "name": "hostname", + "start-time": 20, + "signal": "KILL", + }, + ) + expected_text = ( + "SignalProcessError: Attempting to send the KILL " + f"signal to the process hostname with PID {pid:d} failed" + ) service = self.broker_service - self.assertMessages(service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 1, - "status": FAILED, - "result-text": expected_text}]) + self.assertMessages( + service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 1, + "status": FAILED, + "result-text": expected_text, + }, + ], + ) self.assertTrue("SignalProcessError" in self.logfile.getvalue()) @patch("os.kill") @@ -210,13 +294,23 @@ computed process start time. """ self.manager.add(self.signaller) - self.builder.create_data(100, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=10, - process_name="ooga") + self.builder.create_data( + 100, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="ooga", + ) self.manager.dispatch_message( - {"type": "signal-process", - "operation-id": 1, - "pid": 100, "name": "ooga", - "start-time": 21, "signal": "KILL"}) + { + "type": "signal-process", + "operation-id": 1, + "pid": 100, + "name": "ooga", + "start-time": 21, + "signal": "KILL", + }, + ) kill_mock.assert_called_once_with(100, signal.SIGKILL) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_scriptexecution.py landscape-client-23.08/landscape/client/manager/tests/test_scriptexecution.py --- landscape-client-23.02/landscape/client/manager/tests/test_scriptexecution.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_scriptexecution.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,25 +1,37 @@ -import pwd import os +import pwd +import stat import sys import tempfile -import stat +from unittest import mock -import mock - -from twisted.internet.defer import gatherResults, succeed, fail +from twisted.internet.defer import fail +from twisted.internet.defer import gatherResults +from twisted.internet.defer import succeed from twisted.internet.error import ProcessDone from twisted.python.failure import Failure from landscape import VERSION +from landscape.client.manager.manager import FAILED +from landscape.client.manager.manager import SUCCEEDED +from landscape.client.manager.scriptexecution import ( + FETCH_ATTACHMENTS_FAILED_RESULT, +) +from landscape.client.manager.scriptexecution import PROCESS_FAILED_RESULT +from landscape.client.manager.scriptexecution import ( + ProcessTimeLimitReachedError, +) +from landscape.client.manager.scriptexecution import ScriptExecutionPlugin +from landscape.client.manager.scriptexecution import UBUNTU_PATH +from landscape.client.manager.scriptexecution import UnknownInterpreterError +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper from landscape.lib.fetch import HTTPCodeError from landscape.lib.persist import Persist -from landscape.lib.testing import StubProcessFactory, DummyProcess -from landscape.lib.user import get_user_info, UnknownUserError -from landscape.client.manager.scriptexecution import ( - ScriptExecutionPlugin, ProcessTimeLimitReachedError, PROCESS_FAILED_RESULT, - UBUNTU_PATH, UnknownInterpreterError, FETCH_ATTACHMENTS_FAILED_RESULT) -from landscape.client.manager.manager import SUCCEEDED, FAILED -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper +from landscape.lib.testing import DummyProcess +from landscape.lib.testing import StubProcessFactory +from landscape.lib.user import get_user_info +from landscape.lib.user import UnknownUserError def get_default_environment(): @@ -38,7 +50,7 @@ def encoded_default_environment(): return { - key: value.encode('ascii', 'replace') + key: value.encode("ascii", "replace") for key, value in get_default_environment().items() } @@ -48,7 +60,7 @@ helpers = [ManagerHelper] def setUp(self): - super(RunScriptTests, self).setUp() + super().setUp() self.plugin = ScriptExecutionPlugin() self.manager.add(self.plugin) @@ -64,8 +76,7 @@ def test_snap_path(self): """The bin path for snaps is included in the PATH.""" deferred = self.plugin.run_script("/bin/sh", "echo $PATH") - deferred.addCallback( - lambda result: self.assertIn("/snap/bin", result)) + deferred.addCallback(lambda result: self.assertIn("/snap/bin", result)) return deferred def test_other_interpreter(self): @@ -81,7 +92,8 @@ """ result = self.plugin.run_script( sys.executable, - "import os\nprint(os.environ)") + "import os\nprint(os.environ)", + ) def check_environment(results): for string in get_default_environment(): @@ -99,7 +111,8 @@ result = self.plugin.run_script( sys.executable, "import os\nprint(os.environ)", - server_supplied_env=server_supplied_env) + server_supplied_env=server_supplied_env, + ) def check_environment(results): for string in get_default_environment(): @@ -118,20 +131,23 @@ (encoding from Python's sys.getfilesystemencoding). """ patch_fs_encoding = mock.patch( - 'sys.getfilesystemencoding', return_value='UTF-8' + "sys.getfilesystemencoding", + return_value="UTF-8", ) patch_fs_encoding.start() server_supplied_env = { - "LEMMY": u"Mot\N{LATIN SMALL LETTER O WITH DIAERESIS}rhead", + "LEMMY": "Mot\N{LATIN SMALL LETTER O WITH DIAERESIS}rhead", # Somehow it's just not as cool... } result = self.plugin.run_script( - "/bin/sh", "echo $LEMMY", - server_supplied_env=server_supplied_env) + "/bin/sh", + "echo $LEMMY", + server_supplied_env=server_supplied_env, + ) def check_environment(results): - self.assertEqual(server_supplied_env["LEMMY"] + u'\n', results) + self.assertEqual(server_supplied_env["LEMMY"] + "\n", results) def cleanup(result): patch_fs_encoding.stop() @@ -145,12 +161,16 @@ Server-supplied environment variables override client default values if the server provides them. """ - server_supplied_env = {"PATH": "server-path", "USER": "server-user", - "HOME": "server-home"} + server_supplied_env = { + "PATH": "server-path", + "USER": "server-user", + "HOME": "server-home", + } result = self.plugin.run_script( sys.executable, "import os\nprint(os.environ)", - server_supplied_env=server_supplied_env) + server_supplied_env=server_supplied_env, + ) def check_environment(results): for name, value in server_supplied_env.items(): @@ -181,26 +201,28 @@ Scripts can contain accented data both in the code and in the result. """ - accented_content = u"\N{LATIN SMALL LETTER E WITH ACUTE}" + accented_content = "\N{LATIN SMALL LETTER E WITH ACUTE}" result = self.plugin.run_script( - u"/bin/sh", u"echo %s" % (accented_content,)) + "/bin/sh", + f"echo {accented_content}", + ) # self.assertEqual gets the result as first argument and that's what we # compare against. - result.addCallback( - self.assertEqual, "%s\n" % (accented_content,)) + result.addCallback(self.assertEqual, f"{accented_content}\n") return result def test_accented_run_in_interpreter(self): """ Scripts can also contain accents in the interpreter. """ - accented_content = u"\N{LATIN SMALL LETTER E WITH ACUTE}" + accented_content = "\N{LATIN SMALL LETTER E WITH ACUTE}" result = self.plugin.run_script( - u"/bin/echo %s" % (accented_content,), u"") + f"/bin/echo {accented_content}", + "", + ) def check(result): - self.assertTrue( - "%s " % (accented_content,) in result) + self.assertTrue(f"{accented_content} " in result) result.addCallback(check) return result @@ -220,9 +242,10 @@ result = self.plugin.run_script("/bin/sh", "umask") def check(result): - self.assertEqual("%04o\n" % old_umask, result) + self.assertEqual(f"{old_umask:04o}\n", result) mock_umask.assert_has_calls( - [mock.call(0o22), mock.call(old_umask)]) + [mock.call(0o22), mock.call(old_umask)], + ) result.addCallback(check) return result.addCallback(lambda _: patch_umask.stop()) @@ -236,11 +259,16 @@ mock_umask = patch_umask.start() patch_mkdtemp = mock.patch( - "tempfile.mkdtemp", side_effect=OSError("Fail!")) + "tempfile.mkdtemp", + side_effect=OSError("Fail!"), + ) mock_mkdtemp = patch_mkdtemp.start() result = self.plugin.run_script( - "/bin/sh", "umask", attachments={u"file1": "some data"}) + "/bin/sh", + "umask", + attachments={"file1": "some data"}, + ) def check(error): self.assertIsInstance(error.value, OSError) @@ -257,9 +285,10 @@ def test_run_with_attachments(self): result = self.plugin.run_script( - u"/bin/sh", - u"ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", - attachments={u"file1": "some data"}) + "/bin/sh", + "ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", + attachments={"file1": "some data"}, + ) def check(result): self.assertEqual(result, "file1\nsome data") @@ -275,30 +304,37 @@ """ self.manager.config.url = "https://localhost/message-system" persist = Persist( - filename=os.path.join(self.config.data_path, "broker.bpickle")) + filename=os.path.join(self.config.data_path, "broker.bpickle"), + ) registration_persist = persist.root_at("registration") registration_persist.set("secure-id", "secure_id") persist.save() patch_fetch = mock.patch( - "landscape.client.manager.scriptexecution.fetch_async") + "landscape.client.manager.scriptexecution.fetch_async", + ) mock_fetch = patch_fetch.start() mock_fetch.return_value = succeed(b"some other data") - headers = {"User-Agent": "landscape-client/%s" % VERSION, - "Content-Type": "application/octet-stream", - "X-Computer-ID": "secure_id"} + headers = { + "User-Agent": f"landscape-client/{VERSION}", + "Content-Type": "application/octet-stream", + "X-Computer-ID": "secure_id", + } result = self.plugin.run_script( - u"/bin/sh", - u"ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", - attachments={u"file1": 14}) + "/bin/sh", + "ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", + attachments={"file1": 14}, + ) def check(result): self.assertEqual(result, "file1\nsome other data") mock_fetch.assert_called_with( - "https://localhost/attachment/14", headers=headers, - cainfo=None) + "https://localhost/attachment/14", + headers=headers, + cainfo=None, + ) def cleanup(result): patch_fetch.stop() @@ -315,30 +351,37 @@ self.manager.config.url = "https://localhost/message-system" self.manager.config.ssl_public_key = "/some/key" persist = Persist( - filename=os.path.join(self.config.data_path, "broker.bpickle")) + filename=os.path.join(self.config.data_path, "broker.bpickle"), + ) registration_persist = persist.root_at("registration") registration_persist.set("secure-id", b"secure_id") persist.save() patch_fetch = mock.patch( - "landscape.client.manager.scriptexecution.fetch_async") + "landscape.client.manager.scriptexecution.fetch_async", + ) mock_fetch = patch_fetch.start() mock_fetch.return_value = succeed(b"some other data") - headers = {"User-Agent": "landscape-client/%s" % VERSION, - "Content-Type": "application/octet-stream", - "X-Computer-ID": "secure_id"} + headers = { + "User-Agent": f"landscape-client/{VERSION}", + "Content-Type": "application/octet-stream", + "X-Computer-ID": "secure_id", + } result = self.plugin.run_script( - u"/bin/sh", - u"ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", - attachments={u"file1": 14}) + "/bin/sh", + "ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", + attachments={"file1": 14}, + ) def check(result): self.assertEqual(result, "file1\nsome other data") mock_fetch.assert_called_with( - "https://localhost/attachment/14", headers=headers, - cainfo="/some/key") + "https://localhost/attachment/14", + headers=headers, + cainfo="/some/key", + ) def cleanup(result): patch_fetch.stop() @@ -361,9 +404,10 @@ the script execution plugin tries to remove the attachments directory. """ result = self.plugin.run_script( - u"/bin/sh", - u"ls $LANDSCAPE_ATTACHMENTS && rm -r $LANDSCAPE_ATTACHMENTS", - attachments={u"file1": "some data"}) + "/bin/sh", + "ls $LANDSCAPE_ATTACHMENTS && rm -r $LANDSCAPE_ATTACHMENTS", + attachments={"file1": "some data"}, + ) def check(result): self.assertEqual(result, "file1\n") @@ -429,12 +473,12 @@ patch_getpwnam = mock.patch("pwd.getpwnam") mock_getpwnam = patch_getpwnam.start() - class pwnam(object): + class PwNam: pw_uid = 1234 pw_gid = 5678 pw_dir = self.makeFile() - mock_getpwnam.return_value = pwnam + mock_getpwnam.return_value = PwNam result = self._run_script("user", 1234, 5678, "/") @@ -459,13 +503,17 @@ factory = StubProcessFactory() self.plugin.process_factory = factory - result = self.plugin.run_script("/bin/sh", "echo hi", user=username, - attachments={u"file 1": "some data"}) + result = self.plugin.run_script( + "/bin/sh", + "echo hi", + user=username, + attachments={"file 1": "some data"}, + ) self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.assertIn("LANDSCAPE_ATTACHMENTS", spawn[3]) - attachment_dir = spawn[3]["LANDSCAPE_ATTACHMENTS"].decode('ascii') + attachment_dir = spawn[3]["LANDSCAPE_ATTACHMENTS"].decode("ascii") self.assertEqual(stat.S_IMODE(os.stat(attachment_dir).st_mode), 0o700) filename = os.path.join(attachment_dir, "file 1") self.assertEqual(stat.S_IMODE(os.stat(filename).st_mode), 0o600) @@ -480,7 +528,8 @@ self.assertEqual(data, "foobar") self.assertFalse(os.path.exists(attachment_dir)) mock_chown.assert_has_calls( - [mock.call(mock.ANY, uid, gid) for x in range(3)]) + [mock.call(mock.ANY, uid, gid) for x in range(3)], + ) def cleanup(result): patch_chown.stop() @@ -497,13 +546,15 @@ # Ultimately we assert that the resulting output is limited to # 1024 bytes and indicates its truncation. - result.addCallback(self.assertEqual, - ("x" * (1024 - 21)) + "\n**OUTPUT TRUNCATED**") + result.addCallback( + self.assertEqual, + ("x" * (1024 - 21)) + "\n**OUTPUT TRUNCATED**", + ) protocol = factory.spawns[0][0] # Push 2kB of output, so we trigger truncation. - protocol.childDataReceived(1, b"x" * (2*1024)) + protocol.childDataReceived(1, b"x" * (2 * 1024)) for fd in (0, 1, 2): protocol.childConnectionLost(fd) @@ -520,8 +571,10 @@ # Ultimately we assert that the resulting output is limited to # 1024 bytes and indicates its truncation. - result.addCallback(self.assertEqual, - ("x" * (1024 - 21)) + "\n**OUTPUT TRUNCATED**") + result.addCallback( + self.assertEqual, + ("x" * (1024 - 21)) + "\n**OUTPUT TRUNCATED**", + ) protocol = factory.spawns[0][0] # Push 1024 bytes of output, so we trigger truncation. @@ -606,8 +659,13 @@ @mock.patch("os.chmod") @mock.patch("tempfile.mkstemp") @mock.patch("os.fdopen") - def test_script_is_owned_by_user(self, mock_fdopen, mock_mkstemp, - mock_chmod, mock_chown): + def test_script_is_owned_by_user( + self, + mock_fdopen, + mock_mkstemp, + mock_chmod, + mock_chown, + ): """ This is a very white-box test. When a script is generated, it must be created such that data NEVER gets into it before the file has the @@ -636,7 +694,15 @@ mock_fdopen.side_effect = mock_fdopen_side_effect - def spawnProcess(protocol, filename, args, env, path, uid, gid): + def spawnProcess( # noqa: N802 + protocol, + filename, + args, + env, + path, + uid, + gid, + ): self.assertIsNone(uid) self.assertIsNone(gid) self.assertEqual(encoded_default_environment(), env) @@ -646,8 +712,11 @@ process_factory.spawnProcess = spawnProcess self.plugin.process_factory = process_factory - result = self.plugin.run_script("/bin/sh", "code", - user=pwd.getpwuid(uid)[0]) + result = self.plugin.run_script( + "/bin/sh", + "code", + user=pwd.getpwuid(uid)[0], + ) def check(_): mock_fdopen.assert_called_with(99, "wb") @@ -657,7 +726,8 @@ script_file.close.assert_called_with() self.assertEqual( [mock_mkstemp, mock_fdopen, mock_chmod, mock_chown], - called_mocks) + called_mocks, + ) return result.addCallback(check) @@ -671,7 +741,8 @@ mock_mkstemp.return_value = (fd, filename) d = self.plugin.run_script("/bin/sh", "true") return d.addCallback( - lambda _: self.assertFalse(os.path.exists(filename))) + lambda _: self.assertFalse(os.path.exists(filename)), + ) def test_unknown_interpreter(self): """ @@ -687,7 +758,9 @@ failure.trap(UnknownInterpreterError) self.assertEqual( failure.value.interpreter, - "/bin/cantpossiblyexist") + "/bin/cantpossiblyexist", + ) + return d.addCallback(cb).addErrback(eb) @@ -695,9 +768,10 @@ helpers = [ManagerHelper] def setUp(self): - super(ScriptExecutionMessageTests, self).setUp() + super().setUp() self.broker_service.message_store.set_accepted_types( - ["operation-result"]) + ["operation-result"], + ) self.manager.config.script_users = "ALL" def _verify_script(self, executable, interp, code): @@ -706,19 +780,27 @@ script has the correct content. """ data = open(executable, "r").read() - self.assertEqual(data, "#!%s\n%s" % (interp, code)) + self.assertEqual(data, f"#!{interp}\n{code}") - def _send_script(self, interpreter, code, operation_id=123, - user=pwd.getpwuid(os.getuid())[0], - time_limit=None, attachments={}, - server_supplied_env=None): - message = {"type": "execute-script", - "interpreter": interpreter, - "code": code, - "operation-id": operation_id, - "username": user, - "time-limit": time_limit, - "attachments": dict(attachments)} + def _send_script( + self, + interpreter, + code, + operation_id=123, + user=pwd.getpwuid(os.getuid())[0], + time_limit=None, + attachments={}, + server_supplied_env=None, + ): + message = { + "type": "execute-script", + "interpreter": interpreter, + "code": code, + "operation-id": operation_id, + "username": user, + "time-limit": time_limit, + "attachments": dict(attachments), + } if server_supplied_env: message["env"] = server_supplied_env return self.manager.dispatch_message(message) @@ -739,7 +821,9 @@ self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'") self.assertMessages( - self.broker_service.message_store.get_pending_messages(), []) + self.broker_service.message_store.get_pending_messages(), + [], + ) # Now let's simulate the completion of the process factory.spawns[0][0].childDataReceived(1, b"hi!\n") @@ -748,10 +832,15 @@ def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "status": SUCCEEDED, - "result-text": u"hi!\n"}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "status": SUCCEEDED, + "result-text": "hi!\n", + }, + ], + ) result.addCallback(got_result) return result @@ -768,11 +857,13 @@ self.manager.add(ScriptExecutionPlugin(process_factory=factory)) - result = self._send_script(sys.executable, "print 'hi'", - server_supplied_env={"Dog": "Woof"}) + result = self._send_script( + sys.executable, + "print 'hi'", + server_supplied_env={"Dog": "Woof"}, + ) - self._verify_script( - factory.spawns[0][1], sys.executable, "print 'hi'") + self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'") # Verify environment was passed self.assertIn("HOME", factory.spawns[0][3]) self.assertIn("USER", factory.spawns[0][3]) @@ -781,7 +872,9 @@ self.assertEqual(b"Woof", factory.spawns[0][3]["Dog"]) self.assertMessages( - self.broker_service.message_store.get_pending_messages(), []) + self.broker_service.message_store.get_pending_messages(), + [], + ) # Now let's simulate the completion of the process factory.spawns[0][0].childDataReceived(1, b"Woof\n") @@ -790,10 +883,15 @@ def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "status": SUCCEEDED, - "result-text": u"Woof\n"}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "status": SUCCEEDED, + "result-text": "Woof\n", + }, + ], + ) result.addCallback(got_result) return result @@ -803,7 +901,15 @@ username = pwd.getpwuid(os.getuid())[0] uid, gid, home = get_user_info(username) - def spawnProcess(protocol, filename, args, env, path, uid, gid): + def spawnProcess( # noqa: N802 + protocol, + filename, + args, + env, + path, + uid, + gid, + ): protocol.childDataReceived(1, "hi!\n") protocol.processEnded(Failure(ProcessDone(0))) self._verify_script(filename, sys.executable, "print 'hi'") @@ -811,14 +917,21 @@ process_factory = mock.Mock() process_factory.spawnProcess = mock.Mock(side_effect=spawnProcess) self.manager.add( - ScriptExecutionPlugin(process_factory=process_factory)) + ScriptExecutionPlugin(process_factory=process_factory), + ) result = self._send_script(sys.executable, "print 'hi'", user=username) def check(_): process_factory.spawnProcess.assert_called_with( - mock.ANY, mock.ANY, args=mock.ANY, uid=None, gid=None, - path=mock.ANY, env=encoded_default_environment()) + mock.ANY, + mock.ANY, + args=mock.ANY, + uid=None, + gid=None, + path=mock.ANY, + env=encoded_default_environment(), + ) return result.addCallback(check) @@ -832,17 +945,22 @@ unknown user as an easy way to test it. """ self.log_helper.ignore_errors(UnknownUserError) - username = u"non-existent-f\N{LATIN SMALL LETTER E WITH ACUTE}e" - self.manager.add( - ScriptExecutionPlugin()) + username = "non-existent-f\N{LATIN SMALL LETTER E WITH ACUTE}e" + self.manager.add(ScriptExecutionPlugin()) self._send_script(sys.executable, "print 'hi'", user=username) self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "result-text": u"UnknownUserError: Unknown user '%s'" % username, - "status": FAILED}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "result-text": "UnknownUserError: " + f"Unknown user '{username}'", + "status": FAILED, + }, + ], + ) def test_timeout(self): """ @@ -864,11 +982,16 @@ def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "status": FAILED, - "result-text": u"ONOEZ", - "result-code": 102}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "status": FAILED, + "result-text": "ONOEZ", + "result-code": 102, + }, + ], + ) result.addCallback(got_result) return result @@ -885,10 +1008,17 @@ def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "status": FAILED, - "result-text": u"Scripts cannot be run as user whatever."}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "status": FAILED, + "result-text": ( + "Scripts cannot be run as user whatever." + ), + }, + ], + ) result.addCallback(got_result) return result @@ -896,7 +1026,15 @@ def test_urgent_response(self): """Responses to script execution messages are urgent.""" - def spawnProcess(protocol, filename, args, env, path, uid, gid): + def spawnProcess( # noqa: N802 + protocol, + filename, + args, + env, + path, + uid, + gid, + ): protocol.childDataReceived(1, b"hi!\n") protocol.processEnded(Failure(ProcessDone(0))) self._verify_script(filename, sys.executable, "print 'hi'") @@ -905,19 +1043,31 @@ process_factory.spawnProcess = mock.Mock(side_effect=spawnProcess) self.manager.add( - ScriptExecutionPlugin(process_factory=process_factory)) + ScriptExecutionPlugin(process_factory=process_factory), + ) def got_result(r): self.assertTrue(self.broker_service.exchanger.is_urgent()) self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "result-text": u"hi!\n", - "status": SUCCEEDED}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "result-text": "hi!\n", + "status": SUCCEEDED, + }, + ], + ) process_factory.spawnProcess.assert_called_with( - mock.ANY, mock.ANY, args=mock.ANY, uid=None, gid=None, - path=mock.ANY, env=encoded_default_environment()) + mock.ANY, + mock.ANY, + args=mock.ANY, + uid=None, + gid=None, + path=mock.ANY, + env=encoded_default_environment(), + ) result = self._send_script(sys.executable, "print 'hi'") return result.addCallback(got_result) @@ -927,9 +1077,20 @@ If a script outputs non-printable characters not handled by utf-8, they are replaced during the encoding phase but the script succeeds. """ - def spawnProcess(protocol, filename, args, env, path, uid, gid): + + def spawnProcess( # noqa: N802 + protocol, + filename, + args, + env, + path, + uid, + gid, + ): protocol.childDataReceived( - 1, b"\x7fELF\x01\x01\x01\x00\x00\x00\x95\x01") + 1, + b"\x7fELF\x01\x01\x01\x00\x00\x00\x95\x01", + ) protocol.processEnded(Failure(ProcessDone(0))) self._verify_script(filename, sys.executable, "print 'hi'") @@ -937,18 +1098,27 @@ process_factory.spawnProcess = mock.Mock(side_effect=spawnProcess) self.manager.add( - ScriptExecutionPlugin(process_factory=process_factory)) + ScriptExecutionPlugin(process_factory=process_factory), + ) def got_result(r): self.assertTrue(self.broker_service.exchanger.is_urgent()) - [message] = ( - self.broker_service.message_store.get_pending_messages()) + [ + message, + ] = self.broker_service.message_store.get_pending_messages() self.assertEqual( message["result-text"], - u"\x7fELF\x01\x01\x01\x00\x00\x00\ufffd\x01") + "\x7fELF\x01\x01\x01\x00\x00\x00\ufffd\x01", + ) process_factory.spawnProcess.assert_called_with( - mock.ANY, mock.ANY, args=mock.ANY, uid=None, gid=None, - path=mock.ANY, env=encoded_default_environment()) + mock.ANY, + mock.ANY, + args=mock.ANY, + uid=None, + gid=None, + path=mock.ANY, + env=encoded_default_environment(), + ) result = self._send_script(sys.executable, "print 'hi'") return result.addCallback(got_result) @@ -962,16 +1132,22 @@ self.manager.add(ScriptExecutionPlugin()) self.manager.dispatch_message( - {"type": "execute-script", "operation-id": 444}) + {"type": "execute-script", "operation-id": 444}, + ) - expected_message = [{"type": "operation-result", - "operation-id": 444, - "result-text": u"KeyError: username", - "status": FAILED}] + expected_message = [ + { + "type": "operation-result", + "operation-id": 444, + "result-text": "KeyError: username", + "status": FAILED, + }, + ] self.assertMessages( self.broker_service.message_store.get_pending_messages(), - expected_message) + expected_message, + ) self.assertTrue("KeyError: 'username'" in self.logfile.getvalue()) @@ -986,11 +1162,16 @@ def got_result(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "result-text": "hi\n", - "result-code": PROCESS_FAILED_RESULT, - "status": FAILED}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "result-text": "hi\n", + "result-code": PROCESS_FAILED_RESULT, + "status": FAILED, + }, + ], + ) return result.addCallback(got_result) @@ -1008,7 +1189,9 @@ self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'") self.assertMessages( - self.broker_service.message_store.get_pending_messages(), []) + self.broker_service.message_store.get_pending_messages(), + [], + ) failure = Failure(RuntimeError("Oh noes!")) factory.spawns[0][0].result_deferred.errback(failure) @@ -1016,10 +1199,15 @@ def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "status": FAILED, - "result-text": str(failure)}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "status": FAILED, + "result-text": str(failure), + }, + ], + ) result.addCallback(got_result) return result @@ -1032,30 +1220,43 @@ """ self.manager.config.url = "https://localhost/message-system" persist = Persist( - filename=os.path.join(self.config.data_path, "broker.bpickle")) + filename=os.path.join(self.config.data_path, "broker.bpickle"), + ) registration_persist = persist.root_at("registration") registration_persist.set("secure-id", "secure_id") persist.save() - headers = {"User-Agent": "landscape-client/%s" % VERSION, - "Content-Type": "application/octet-stream", - "X-Computer-ID": "secure_id"} + headers = { + "User-Agent": f"landscape-client/{VERSION}", + "Content-Type": "application/octet-stream", + "X-Computer-ID": "secure_id", + } mock_fetch.return_value = fail(HTTPCodeError(404, "Not found")) self.manager.add(ScriptExecutionPlugin()) result = self._send_script( - "/bin/sh", "echo hi", attachments={u"file1": 14}) + "/bin/sh", + "echo hi", + attachments={"file1": 14}, + ) def got_result(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "result-text": "Server returned HTTP code 404", - "result-code": FETCH_ATTACHMENTS_FAILED_RESULT, - "status": FAILED}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "result-text": "Server returned HTTP code 404", + "result-code": FETCH_ATTACHMENTS_FAILED_RESULT, + "status": FAILED, + }, + ], + ) mock_fetch.assert_called_with( - "https://localhost/attachment/14", headers=headers, - cainfo=None) + "https://localhost/attachment/14", + headers=headers, + cainfo=None, + ) return result.addCallback(got_result) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_service.py landscape-client-23.08/landscape/client/manager/tests/test_service.py --- landscape-client-23.02/landscape/client/manager/tests/test_service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,10 @@ -from landscape.client.tests.helpers import ( - LandscapeTest, FakeBrokerServiceHelper) -from landscape.lib.testing import FakeReactor -from landscape.client.manager.config import ManagerConfiguration, ALL_PLUGINS -from landscape.client.manager.service import ManagerService +from landscape.client.manager.config import ALL_PLUGINS +from landscape.client.manager.config import ManagerConfiguration from landscape.client.manager.processkiller import ProcessKiller +from landscape.client.manager.service import ManagerService +from landscape.client.tests.helpers import FakeBrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.testing import FakeReactor class ManagerServiceTest(LandscapeTest): @@ -11,7 +12,7 @@ helpers = [FakeBrokerServiceHelper] def setUp(self): - super(ManagerServiceTest, self).setUp() + super().setUp() config = ManagerConfiguration() config.load(["-c", self.config_filename]) @@ -41,6 +42,7 @@ The L{ManagerService.startService} method connects to the broker, starts the plugins and register the manager as broker client. """ + def stop_service(ignored): for plugin in self.service.plugins: if getattr(plugin, "stop", None) is not None: diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_shutdownmanager.py landscape-client-23.08/landscape/client/manager/tests/test_shutdownmanager.py --- landscape-client-23.02/landscape/client/manager/tests/test_shutdownmanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_shutdownmanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,15 @@ -from twisted.python.failure import Failure -from twisted.internet.error import ProcessTerminated, ProcessDone +from twisted.internet.error import ProcessDone +from twisted.internet.error import ProcessTerminated from twisted.internet.protocol import ProcessProtocol +from twisted.python.failure import Failure +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import SUCCEEDED +from landscape.client.manager.shutdownmanager import ShutdownManager +from landscape.client.manager.shutdownmanager import ShutdownProcessProtocol +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper from landscape.lib.testing import StubProcessFactory -from landscape.client.manager.plugin import SUCCEEDED, FAILED -from landscape.client.manager.shutdownmanager import ( - ShutdownManager, ShutdownProcessProtocol) -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper class ShutdownManagerTest(LandscapeTest): @@ -14,9 +17,10 @@ helpers = [ManagerHelper] def setUp(self): - super(ShutdownManagerTest, self).setUp() + super().setUp() self.broker_service.message_store.set_accepted_types( - ["shutdown", "operation-result"]) + ["shutdown", "operation-result"], + ) self.broker_service.pinger.start() self.process_factory = StubProcessFactory() self.plugin = ShutdownManager(process_factory=self.process_factory) @@ -37,16 +41,32 @@ self.assertTrue(isinstance(protocol, ShutdownProcessProtocol)) self.assertEqual( arguments[1:3], - ("/sbin/shutdown", ["/sbin/shutdown", "-r", "+4", - "Landscape is rebooting the system"])) + ( + "/sbin/shutdown", + [ + "/sbin/shutdown", + "-r", + "+4", + "Landscape is rebooting the system", + ], + ), + ) def restart_performed(ignore): self.assertTrue(self.broker_service.exchanger.is_urgent()) self.assertEqual( self.broker_service.message_store.get_pending_messages(), - [{"type": "operation-result", "api": b"3.2", - "operation-id": 100, "timestamp": 10, "status": SUCCEEDED, - "result-text": u"Data may arrive in batches."}]) + [ + { + "type": "operation-result", + "api": b"3.2", + "operation-id": 100, + "timestamp": 10, + "status": SUCCEEDED, + "result-text": "Data may arrive in batches.", + }, + ], + ) protocol.result.addCallback(restart_performed) protocol.childDataReceived(0, b"Data may arrive ") @@ -68,8 +88,16 @@ [arguments] = self.process_factory.spawns self.assertEqual( arguments[1:3], - ("/sbin/shutdown", ["/sbin/shutdown", "-h", "+4", - "Landscape is shutting down the system"])) + ( + "/sbin/shutdown", + [ + "/sbin/shutdown", + "-h", + "+4", + "Landscape is shutting down the system", + ], + ), + ) def test_restart_fails(self): """ @@ -90,15 +118,17 @@ self.assertEqual(message["operation-id"], 100) self.assertEqual(message["timestamp"], 0) self.assertEqual(message["status"], FAILED) - self.assertIn(u"Failure text is reported.", message["result-text"]) + self.assertIn("Failure text is reported.", message["result-text"]) # Check that after failing, we attempt to force the shutdown by # switching the binary called [spawn1_args, spawn2_args] = self.process_factory.spawns protocol = spawn2_args[0] self.assertIsInstance(protocol, ProcessProtocol) - self.assertEqual(spawn2_args[1:3], - ("/sbin/reboot", ["/sbin/reboot"])) + self.assertEqual( + spawn2_args[1:3], + ("/sbin/reboot", ["/sbin/reboot"]), + ) [arguments] = self.process_factory.spawns protocol = arguments[0] diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_snapmanager.py landscape-client-23.08/landscape/client/manager/tests/test_snapmanager.py --- landscape-client-23.02/landscape/client/manager/tests/test_snapmanager.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_snapmanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,259 @@ +from unittest import mock + +from landscape.client.manager.manager import FAILED +from landscape.client.manager.manager import SUCCEEDED +from landscape.client.manager.snapmanager import SnapManager +from landscape.client.snap.http import SnapdHttpException +from landscape.client.snap.http import SnapHttp as OrigSnapHttp +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper + + +class SnapManagerTest(LandscapeTest): + helpers = [ManagerHelper] + + def setUp(self): + super().setUp() + + self.snap_http = mock.Mock(spec_set=OrigSnapHttp) + self.SnapHttp = mock.patch( + "landscape.client.manager.snapmanager.SnapHttp", + ).start() + + self.SnapHttp.return_value = self.snap_http + + self.broker_service.message_store.set_accepted_types( + ["operation-result"], + ) + self.plugin = SnapManager() + self.manager.add(self.plugin) + + self.manager.config.snapd_poll_attempts = 2 + self.manager.config.snapd_poll_interval = 0.1 + + def tearDown(self): + mock.patch.stopall() + + def test_install_snaps(self): + """ + When at least one channel or revision is specified, snaps are + installed via one call to snapd per snap. + """ + + def install_snap(name, revision=None, channel=None, classic=False): + if name == "hello": + return {"change": "1"} + + if name == "goodbye": + return {"change": "2"} + + return mock.DEFAULT + + self.snap_http.install_snap.side_effect = install_snap + self.snap_http.check_changes.return_value = { + "result": [ + {"id": "1", "status": "Done"}, + {"id": "2", "status": "Done"}, + ], + } + self.snap_http.get_snaps.return_value = {"installed": []} + + result = self.manager.dispatch_message( + { + "type": "install-snaps", + "operation-id": 123, + "snaps": [ + {"name": "hello", "revision": 9001}, + {"name": "goodbye"}, + ], + }, + ) + + def got_result(r): + self.assertMessages( + self.broker_service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "result-text": "{'completed': ['hello', 'goodbye'], " + "'errored': [], 'errors': {}}", + "operation-id": 123, + }, + ], + ) + + return result.addCallback(got_result) + + def test_install_snaps_batch(self): + """ + When no channels or revisions are specified, snaps are installed + via a single call to snapd. + """ + self.snap_http.install_snaps.return_value = {"change": "1"} + self.snap_http.check_changes.return_value = { + "result": [{"id": "1", "status": "Done"}], + } + self.snap_http.get_snaps.return_value = { + "installed": [ + { + "name": "hello", + "id": "test", + "confinement": "strict", + "tracking-channel": "latest/stable", + "revision": "100", + "publisher": {"validation": "yep", "username": "me"}, + "version": "1.2.3", + }, + ], + } + + result = self.manager.dispatch_message( + { + "type": "install-snaps", + "operation-id": 123, + "snaps": [ + {"name": "hello"}, + {"name": "goodbye"}, + ], + }, + ) + + def got_result(r): + self.assertMessages( + self.broker_service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "result-text": "{'completed': ['BATCH'], 'errored': " + "[], 'errors': {}}", + "operation-id": 123, + }, + ], + ) + + return result.addCallback(got_result) + + def test_install_snap_immediate_error(self): + self.snap_http.install_snaps.side_effect = SnapdHttpException( + b'{"result": "whoops"}', + ) + self.snap_http.get_snaps.return_value = {"installed": []} + + result = self.manager.dispatch_message( + { + "type": "install-snaps", + "operation-id": 123, + "snaps": [{"name": "hello"}], + }, + ) + + self.log_helper.ignore_errors(r".+whoops$") + + def got_result(r): + self.assertMessages( + self.broker_service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": FAILED, + "result-text": "{'completed': [], 'errored': [], " + "'errors': {'BATCH': 'whoops'}}", + "operation-id": 123, + }, + ], + ) + + return result.addCallback(got_result) + + def test_install_snap_no_status(self): + self.snap_http.install_snaps.return_value = {"change": "1"} + self.snap_http.check_changes.return_value = {"result": []} + self.snap_http.get_snaps.return_value = {"installed": []} + + result = self.manager.dispatch_message( + { + "type": "install-snaps", + "operation-id": 123, + "snaps": [{"name": "hello"}], + }, + ) + + def got_result(r): + self.assertMessages( + self.broker_service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": FAILED, + "result-text": "{'completed': [], 'errored': ['BATCH']" + ", 'errors': {'BATCH': 'Unknown'}}", + "operation-id": 123, + }, + ], + ) + + return result.addCallback(got_result) + + def test_install_snap_check_error(self): + self.snap_http.install_snaps.return_value = {"change": "1"} + self.snap_http.check_changes.side_effect = SnapdHttpException("whoops") + self.snap_http.get_snaps.return_value = {"installed": []} + + result = self.manager.dispatch_message( + { + "type": "install-snaps", + "operation-id": 123, + "snaps": [{"name": "hello"}], + }, + ) + + self.log_helper.ignore_errors(r".+whoops$") + + def got_result(r): + self.assertMessages( + self.broker_service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": FAILED, + "result-text": "{'completed': [], 'errored': ['BATCH']" + ", 'errors': {'BATCH': 'whoops'}}", + "operation-id": 123, + }, + ], + ) + + return result.addCallback(got_result) + + def test_remove_snap(self): + self.snap_http.remove_snaps.return_value = {"change": "1"} + self.snap_http.check_changes.return_value = { + "result": [{"id": "1", "status": "Done"}], + } + self.snap_http.get_snaps.return_value = {"installed": []} + + result = self.manager.dispatch_message( + { + "type": "remove-snaps", + "operation-id": 123, + "snaps": [{"name": "hello"}], + }, + ) + + def got_result(r): + self.assertMessages( + self.broker_service.message_store.get_pending_messages(), + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "result-text": "{'completed': ['BATCH'], 'errored': []" + ", 'errors': {}}", + "operation-id": 123, + }, + ], + ) + + return result.addCallback(got_result) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_store.py landscape-client-23.08/landscape/client/manager/tests/test_store.py --- landscape-client-23.02/landscape/client/manager/tests/test_store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_store.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,13 @@ -from landscape.client.tests.helpers import LandscapeTest - from landscape.client.manager.store import ManagerStore +from landscape.client.tests.helpers import LandscapeTest class ManagerStoreTest(LandscapeTest): - def setUp(self): - super(ManagerStoreTest, self).setUp() + super().setUp() self.filename = self.makeFile() self.store = ManagerStore(self.filename) - self.store.add_graph(1, u"file 1", u"user1") + self.store.add_graph(1, "file 1", "user1") self.store.set_graph_accumulate(1, 1234, 1.0) def test_get_unknown_graph(self): @@ -18,11 +16,11 @@ def test_get_graph(self): graph = self.store.get_graph(1) - self.assertEqual(graph, (1, u"file 1", u"user1")) + self.assertEqual(graph, (1, "file 1", "user1")) def test_get_graphs(self): graphs = self.store.get_graphs() - self.assertEqual(graphs, [(1, u"file 1", u"user1")]) + self.assertEqual(graphs, [(1, "file 1", "user1")]) def test_get_no_graphs(self): self.store.remove_graph(1) @@ -30,14 +28,14 @@ self.assertEqual(graphs, []) def test_add_graph(self): - self.store.add_graph(2, u"file 2", u"user2") + self.store.add_graph(2, "file 2", "user2") graph = self.store.get_graph(2) - self.assertEqual(graph, (2, u"file 2", u"user2")) + self.assertEqual(graph, (2, "file 2", "user2")) def test_add_update_graph(self): - self.store.add_graph(1, u"file 2", u"user2") + self.store.add_graph(1, "file 2", "user2") graph = self.store.get_graph(1) - self.assertEqual(graph, (1, u"file 2", u"user2")) + self.assertEqual(graph, (1, "file 2", "user2")) def test_remove_graph(self): self.store.remove_graph(1) @@ -47,7 +45,7 @@ def test_remove_unknown_graph(self): self.store.remove_graph(2) graphs = self.store.get_graphs() - self.assertEqual(graphs, [(1, u"file 1", u"user1")]) + self.assertEqual(graphs, [(1, "file 1", "user1")]) def test_get_accumulate_unknown_graph(self): accumulate = self.store.get_graph_accumulate(2) diff -Nru landscape-client-23.02/landscape/client/manager/tests/test_usermanager.py landscape-client-23.08/landscape/client/manager/tests/test_usermanager.py --- landscape-client-23.02/landscape/client/manager/tests/test_usermanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/tests/test_usermanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,18 @@ -# -*- coding: utf-8 -*- import os -from mock import Mock +from unittest.mock import Mock -from landscape.lib.persist import Persist -from landscape.lib.twisted_util import gather_results -from landscape.client.manager.plugin import SUCCEEDED, FAILED +from landscape.client.manager.plugin import FAILED +from landscape.client.manager.plugin import SUCCEEDED +from landscape.client.manager.usermanager import RemoteUserManagerConnector +from landscape.client.manager.usermanager import UserManager from landscape.client.monitor.usermonitor import UserMonitor -from landscape.client.manager.usermanager import ( - UserManager, RemoteUserManagerConnector) -from landscape.client.user.tests.helpers import ( - FakeUserProvider, FakeUserManagement) -from landscape.client.tests.helpers import LandscapeTest, ManagerHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import ManagerHelper from landscape.client.user.provider import UserManagementError +from landscape.client.user.tests.helpers import FakeUserManagement +from landscape.client.user.tests.helpers import FakeUserProvider +from landscape.lib.persist import Persist +from landscape.lib.twisted_util import gather_results class UserGroupTestBase(LandscapeTest): @@ -19,27 +20,34 @@ helpers = [ManagerHelper] def setUp(self): - super(UserGroupTestBase, self).setUp() - self.shadow_file = self.makeFile("""\ + super().setUp() + self.shadow_file = self.makeFile( + """\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: -""") +""", + ) accepted_types = ["operation-result", "users"] self.broker_service.message_store.set_accepted_types(accepted_types) def tearDown(self): - super(UserGroupTestBase, self).tearDown() + super().tearDown() for plugin in self.plugins: plugin.stop() def setup_environment(self, users, groups, shadow_file): - provider = FakeUserProvider(users=users, groups=groups, - shadow_file=shadow_file) + provider = FakeUserProvider( + users=users, + groups=groups, + shadow_file=shadow_file, + ) user_monitor = UserMonitor(provider=provider) management = FakeUserManagement(provider=provider) - user_manager = UserManager(management=management, - shadow_file=shadow_file) + user_manager = UserManager( + management=management, + shadow_file=shadow_file, + ) self.manager.persist = Persist() user_monitor.register(self.manager) user_manager.register(self.manager) @@ -48,7 +56,6 @@ class UserOperationsMessagingTest(UserGroupTestBase): - def test_add_user_event(self): """ When an C{add-user} event is received the user should be @@ -59,30 +66,52 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "add_user succeeded"}, - {"timestamp": 0, "type": "users", - "operation-id": 123, - "create-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": "Room 101", - "work-phone": "+12345", - "name": u"John Doe", - "primary-gid": 1000}]}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "add_user succeeded", + }, + { + "timestamp": 0, + "type": "users", + "operation-id": 123, + "create-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": "Room 101", + "work-phone": "+12345", + "name": "John Doe", + "primary-gid": 1000, + }, + ], + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"username": "jdoe", "name": "John Doe", "password": "password", - "operation-id": 123, "require-password-reset": False, - "primary-group-name": None, "location": "Room 101", - "work-number": "+12345", "home-number": None, - "type": "add-user"}) + { + "username": "jdoe", + "name": "John Doe", + "password": "password", + "operation-id": 123, + "require-password-reset": False, + "primary-group-name": None, + "location": "Room 101", + "work-number": "+12345", + "home-number": None, + "type": "add-user", + }, + ) result.addCallback(handle_callback) return result @@ -92,32 +121,55 @@ When an C{add-user} event with utf-8 unicode strings is received the user should be added. """ + def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "add_user succeeded"}, - {"timestamp": 0, "type": "users", - "operation-id": 123, - "create-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": "Room 101", - "work-phone": "+12345", - "name": u"請不要刪除", - "primary-gid": 1000}]}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "add_user succeeded", + }, + { + "timestamp": 0, + "type": "users", + "operation-id": 123, + "create-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": "Room 101", + "work-phone": "+12345", + "name": "請不要刪除", + "primary-gid": 1000, + }, + ], + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"username": "jdoe", "name": "請不要刪除", "password": "password", - "operation-id": 123, "require-password-reset": False, - "primary-group-name": None, "location": "Room 101", - "work-number": "+12345", "home-number": None, - "type": "add-user"}) + { + "username": "jdoe", + "name": "請不要刪除", + "password": "password", + "operation-id": 123, + "require-password-reset": False, + "primary-group-name": None, + "location": "Room 101", + "work-number": "+12345", + "home-number": None, + "type": "add-user", + }, + ) result.addCallback(handle_callback) return result @@ -128,38 +180,55 @@ received the user should be added. This is what the server is sending over the wire in the real-world. """ + def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "add_user succeeded"}, - {"timestamp": 0, "type": "users", - "operation-id": 123, - "create-users": [ - {"home-phone": u"請不要刪除", - "username": u"請不要刪除", - "uid": 1000, - "enabled": True, - "location": u"請不要刪除", - "work-phone": u"請不要刪除", - "name": u"請不要刪除", - "primary-gid": 1000}]}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "add_user succeeded", + }, + { + "timestamp": 0, + "type": "users", + "operation-id": 123, + "create-users": [ + { + "home-phone": "請不要刪除", + "username": "請不要刪除", + "uid": 1000, + "enabled": True, + "location": "請不要刪除", + "work-phone": "請不要刪除", + "name": "請不要刪除", + "primary-gid": 1000, + }, + ], + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {'username': u'\u8acb\u4e0d\u8981\u522a\u9664', - 'work-number': u'\u8acb\u4e0d\u8981\u522a\u9664', - 'home-number': u'\u8acb\u4e0d\u8981\u522a\u9664', - 'name': u'\u8acb\u4e0d\u8981\u522a\u9664', - 'operation-id': 123, - 'require-password-reset': False, - 'password': u'\u8acb\u4e0d\u8981\u522a\u9664', - 'type': 'add-user', - 'primary-group-name': u'\u8acb\u4e0d\u8981\u522a\u9664', - 'location': u'\u8acb\u4e0d\u8981\u522a\u9664'}) + { + "username": "\u8acb\u4e0d\u8981\u522a\u9664", + "work-number": "\u8acb\u4e0d\u8981\u522a\u9664", + "home-number": "\u8acb\u4e0d\u8981\u522a\u9664", + "name": "\u8acb\u4e0d\u8981\u522a\u9664", + "operation-id": 123, + "require-password-reset": False, + "password": "\u8acb\u4e0d\u8981\u522a\u9664", + "type": "add-user", + "primary-group-name": "\u8acb\u4e0d\u8981\u522a\u9664", + "location": "\u8acb\u4e0d\u8981\u522a\u9664", + }, + ) result.addCallback(handle_callback) return result @@ -174,16 +243,30 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() - self.assertMessages(messages, - [{"type": "operation-result", "status": FAILED, - "operation-id": 123, "timestamp": 0, - "result-text": "KeyError: 'username'"}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": FAILED, + "operation-id": 123, + "timestamp": 0, + "result-text": "KeyError: 'username'", + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"name": "John Doe", "password": "password", "operation-id": 123, - "require-password-reset": False, "type": "add-user"}) + { + "name": "John Doe", + "password": "password", + "operation-id": 123, + "require-password-reset": False, + "type": "add-user", + }, + ) result.addCallback(handle_callback) return result @@ -210,10 +293,19 @@ plugin = self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"username": "jdoe", "name": "John Doe", "password": "password", - "operation-id": 123, "require-password-reset": False, - "primary-group-name": None, "type": "add-user", - "location": None, "home-number": "+123456", "work-number": None}) + { + "username": "jdoe", + "name": "John Doe", + "password": "password", + "operation-id": 123, + "require-password-reset": False, + "primary-group-name": None, + "type": "add-user", + "location": None, + "home-number": "+123456", + "work-number": None, + }, + ) result.addCallback(handle_callback1) return result @@ -230,33 +322,59 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) messages = [messages[0], messages[2]] - self.assertMessages(messages, - [{"type": "users", - "create-users": [{"home-phone": None, - "name": "Bo", - "username": "bo", - "uid": 1000, - "enabled": True, - "location": None, - "primary-gid": 1000, - "work-phone": None}]}, - {"type": "users", "operation-id": 123, - "create-users": [{"home-phone": "+123456", - "username": "jdoe", - "uid": 1001, - "enabled": True, - "location": None, - "work-phone": None, - "name": "John Doe", - "primary-gid": 1001}]}]) + self.assertMessages( + messages, + [ + { + "type": "users", + "create-users": [ + { + "home-phone": None, + "name": "Bo", + "username": "bo", + "uid": 1000, + "enabled": True, + "location": None, + "primary-gid": 1000, + "work-phone": None, + }, + ], + }, + { + "type": "users", + "operation-id": 123, + "create-users": [ + { + "home-phone": "+123456", + "username": "jdoe", + "uid": 1001, + "enabled": True, + "location": None, + "work-phone": None, + "name": "John Doe", + "primary-gid": 1001, + }, + ], + }, + ], + ) users = [("bo", "x", 1000, 1000, "Bo,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], None) result = self.manager.dispatch_message( - {"username": "jdoe", "name": "John Doe", "password": "password", - "operation-id": 123, "require-password-reset": False, - "type": "add-user", "primary-group-name": None, - "location": None, "work-number": None, "home-number": "+123456"}) + { + "username": "jdoe", + "name": "John Doe", + "password": "password", + "operation-id": 123, + "require-password-reset": False, + "type": "add-user", + "primary-group-name": None, + "location": None, + "work-number": None, + "home-number": "+123456", + }, + ) result.addCallback(handle_callback) return result @@ -272,32 +390,55 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. - self.assertMessages(messages[1:], - [{"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 99, "timestamp": 0, - "result-text": "set_user_details succeeded"}, - {"update-users": [{"username": "jdoe", - "uid": 1001, - "enabled": True, - "work-phone": "789WORK", - "home-phone": "123HOME", - "location": "Everywhere", - "name": "John Doe", - "primary-gid": 1001}], - "timestamp": 0, "type": "users", - "operation-id": 99}]) - - users = [("jdoe", "x", 1001, 1000, "John Doe,,,,", - "/home/bo", "/bin/zsh")] + self.assertMessages( + messages[1:], + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 99, + "timestamp": 0, + "result-text": "set_user_details succeeded", + }, + { + "update-users": [ + { + "username": "jdoe", + "uid": 1001, + "enabled": True, + "work-phone": "789WORK", + "home-phone": "123HOME", + "location": "Everywhere", + "name": "John Doe", + "primary-gid": 1001, + }, + ], + "timestamp": 0, + "type": "users", + "operation-id": 99, + }, + ], + ) + + users = [ + ("jdoe", "x", 1001, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] groups = [("users", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"uid": 1001, "username": "jdoe", "password": "password", - "name": "John Doe", "location": "Everywhere", - "work-number": "789WORK", "home-number": "123HOME", - "operation-id": 99, "primary-group-name": u"users", - "type": "edit-user"}) + { + "uid": 1001, + "username": "jdoe", + "password": "password", + "name": "John Doe", + "location": "Everywhere", + "work-number": "789WORK", + "home-number": "123HOME", + "operation-id": 99, + "primary-group-name": "users", + "type": "edit-user", + }, + ) result.addCallback(handle_callback) return result @@ -321,14 +462,23 @@ self.assertEqual(messages, new_messages) return result - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", - "/home/bo", "/bin/zsh")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] plugin = self.setup_environment(users, [], None) result = self.manager.dispatch_message( - {"username": "jdoe", "password": "password", "name": "John Doe", - "location": "Everywhere", "work-number": "789WORK", - "home-number": "123HOME", "primary-group-name": None, - "type": "edit-user", "operation-id": 99}) + { + "username": "jdoe", + "password": "password", + "name": "John Doe", + "location": "Everywhere", + "work-number": "789WORK", + "home-number": "123HOME", + "primary-group-name": None, + "type": "edit-user", + "operation-id": 99, + }, + ) result.addCallback(handle_callback1) return result @@ -343,39 +493,63 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-group-members": {u"users": - [u"jdoe"]}, - "create-groups": [{"gid": 1001, - "name": u"users"}], - "create-users": [{"home-phone": None, - "work-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": None, - "name": "John Doe", - "primary-gid": 1000}]}, - {"type": "users", "operation-id": 99, - "update-users": [{"username": "jdoe", - "uid": 1000, - "enabled": True, - "work-phone": "789WORK", - "home-phone": "123HOME", - "location": "Everywhere", - "primary-gid": 1001, - "name": "John Doe"}]}]) - - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-group-members": {"users": ["jdoe"]}, + "create-groups": [{"gid": 1001, "name": "users"}], + "create-users": [ + { + "home-phone": None, + "work-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": None, + "name": "John Doe", + "primary-gid": 1000, + }, + ], + }, + { + "type": "users", + "operation-id": 99, + "update-users": [ + { + "username": "jdoe", + "uid": 1000, + "enabled": True, + "work-phone": "789WORK", + "home-phone": "123HOME", + "location": "Everywhere", + "primary-gid": 1001, + "name": "John Doe", + }, + ], + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] groups = [("users", "x", 1001, ["jdoe"])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": "jdoe", "password": "password", "name": "John Doe", - "location": "Everywhere", "work-number": "789WORK", - "home-number": "123HOME", "primary-group-name": u"users", - "type": "edit-user", "operation-id": 99}) + { + "username": "jdoe", + "password": "password", + "name": "John Doe", + "location": "Everywhere", + "work-number": "789WORK", + "home-number": "123HOME", + "primary-group-name": "users", + "type": "edit-user", + "operation-id": 99, + }, + ) result.addCallback(handle_callback) return result @@ -393,22 +567,37 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. - self.assertMessages([messages[2], messages[1]], - [{"timestamp": 0, "delete-users": ["jdoe"], - "type": "users", "operation-id": 39}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 39, "timestamp": 0, - "result-text": "remove_user succeeded"}]) - - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + self.assertMessages( + [messages[2], messages[1]], + [ + { + "timestamp": 0, + "delete-users": ["jdoe"], + "type": "users", + "operation-id": 39, + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 39, + "timestamp": 0, + "result-text": "remove_user succeeded", + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] self.setup_environment(users, [], None) result = self.manager.dispatch_message( - {"username": "jdoe", - "delete-home": True, - "type": "remove-user", - "operation-id": 39}) + { + "username": "jdoe", + "delete-home": True, + "type": "remove-user", + "operation-id": 39, + }, + ) result.addCallback(handle_callback) return result @@ -417,34 +606,60 @@ The L{UserManager} can handle multiple remove-user events at the same time. """ - users = [("foo", "x", 1000, 1000, "Foo,,,,", "/home/foo", "/bin/zsh"), - ("bar", "x", 1001, 1001, "Bar,,,,", "/home/bar", "/bin/zsh")] + users = [ + ("foo", "x", 1000, 1000, "Foo,,,,", "/home/foo", "/bin/zsh"), + ("bar", "x", 1001, 1001, "Bar,,,,", "/home/bar", "/bin/zsh"), + ] self.setup_environment(users, [], None) def handle_callback(ignored): messages = self.broker_service.message_store.get_pending_messages() # Ignore the message created by plugin.run. - messages = sorted([messages[1], messages[3]], - key=lambda message: message["operation-id"]) - self.assertMessages(messages, - [{"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 39, "timestamp": 0, - "result-text": "remove_user succeeded"}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 40, "timestamp": 0, - "result-text": "remove_user succeeded"}]) + messages = sorted( + [messages[1], messages[3]], + key=lambda message: message["operation-id"], + ) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 39, + "timestamp": 0, + "result-text": "remove_user succeeded", + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 40, + "timestamp": 0, + "result-text": "remove_user succeeded", + }, + ], + ) results = [] - results.append(self.manager.dispatch_message({"username": "foo", - "delete-home": True, - "type": "remove-user", - "operation-id": 39})) - results.append(self.manager.dispatch_message({"username": "bar", - "delete-home": True, - "type": "remove-user", - "operation-id": 40})) + results.append( + self.manager.dispatch_message( + { + "username": "foo", + "delete-home": True, + "type": "remove-user", + "operation-id": 39, + }, + ), + ) + results.append( + self.manager.dispatch_message( + { + "username": "bar", + "delete-home": True, + "type": "remove-user", + "operation-id": 40, + }, + ), + ) return gather_results(results).addCallback(handle_callback) def test_failing_remove_user_event(self): @@ -458,17 +673,28 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) failure_string = "UserManagementError: remove_user failed" - self.assertMessages(messages, - [{"type": "operation-result", "status": FAILED, - "operation-id": 39, "timestamp": 0, - "result-text": failure_string}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": FAILED, + "operation-id": 39, + "timestamp": 0, + "result-text": failure_string, + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"username": "jdoe", - "delete-home": True, - "type": "remove-user", - "operation-id": 39}) + { + "username": "jdoe", + "delete-home": True, + "type": "remove-user", + "operation-id": 39, + }, + ) result.addCallback(handle_callback) return result @@ -483,26 +709,40 @@ """ def handle_callback(result): - messages = ( - self.broker_service.message_store.get_pending_messages()) + messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. - self.assertMessages([messages[2], messages[1]], - [{"timestamp": 0, "delete-users": ["jdoe"], - "type": "users", "operation-id": 39}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 39, "timestamp": 0, - "result-text": "remove_user succeeded"}]) - - users = [("jdoe", "x", 1000, 1000, - "John Doe,,,,", "/home/bo", "/bin/zsh")] + self.assertMessages( + [messages[2], messages[1]], + [ + { + "timestamp": 0, + "delete-users": ["jdoe"], + "type": "users", + "operation-id": 39, + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 39, + "timestamp": 0, + "result-text": "remove_user succeeded", + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] self.setup_environment(users, [], None) result = self.manager.dispatch_message( - {"username": "jdoe", - "delete-home": False, - "type": "remove-user", - "operation-id": 39}) + { + "username": "jdoe", + "delete-home": False, + "type": "remove-user", + "operation-id": 39, + }, + ) result.addCallback(handle_callback) return result @@ -526,14 +766,18 @@ new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] plugin = self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "jdoe", - "delete-home": True, - "type": "remove-user", - "operation-id": 39}) + { + "username": "jdoe", + "delete-home": True, + "type": "remove-user", + "operation-id": 39, + }, + ) result.addCallback(handle_callback1) return result @@ -549,28 +793,44 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "John Doe"}]}, - {"type": "users", - "delete-users": ["jdoe"], - "operation-id": 39}]) - - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "John Doe", + }, + ], + }, + { + "type": "users", + "delete-users": ["jdoe"], + "operation-id": 39, + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] self.setup_environment(users, [], None) result = self.manager.dispatch_message( - {"username": "jdoe", - "delete-home": True, - "type": "remove-user", - "operation-id": 39}) + { + "username": "jdoe", + "delete-home": True, + "type": "remove-user", + "operation-id": 39, + }, + ) result.addCallback(handle_callback) return result @@ -587,29 +847,43 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3, messages) # Ignore the message created by plugin.run. - self.assertMessages([messages[2], messages[1]], - [{"timestamp": 0, "type": "users", - "operation-id": 99, - "update-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": False, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": u"John Doe"}]}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 99, "timestamp": 0, - "result-text": "lock_user succeeded"}]) - - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + self.assertMessages( + [messages[2], messages[1]], + [ + { + "timestamp": 0, + "type": "users", + "operation-id": 99, + "update-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": False, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "John Doe", + }, + ], + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 99, + "timestamp": 0, + "result-text": "lock_user succeeded", + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "jdoe", - "operation-id": 99, - "type": "lock-user"}) + {"username": "jdoe", "operation-id": 99, "type": "lock-user"}, + ) result.addCallback(handle_callback) return result @@ -625,17 +899,23 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) failure_string = "UserManagementError: lock_user failed" - self.assertMessages(messages, - [{"type": "operation-result", - "status": FAILED, - "operation-id": 99, "timestamp": 0, - "result-text": failure_string}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": FAILED, + "operation-id": 99, + "timestamp": 0, + "result-text": failure_string, + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"username": "jdoe", - "operation-id": 99, - "type": "lock-user"}) + {"username": "jdoe", "operation-id": 99, "type": "lock-user"}, + ) result.addCallback(handle_callback) return result @@ -659,13 +939,13 @@ new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] plugin = self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "jdoe", - "type": "lock-user", - "operation-id": 99}) + {"username": "jdoe", "type": "lock-user", "operation-id": 99}, + ) result.addCallback(handle_callback1) return result @@ -676,36 +956,54 @@ should first detect changes and then perform the operation. The results should be reported in separate messages. """ + def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "John Doe"}]}, - {"type": "users", "operation-id": 99, - "update-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": False, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "John Doe"}]}]) - - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", - "/bin/zsh")] + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "John Doe", + }, + ], + }, + { + "type": "users", + "operation-id": 99, + "update-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": False, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "John Doe", + }, + ], + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh"), + ] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "jdoe", - "type": "lock-user", - "operation-id": 99}) + {"username": "jdoe", "type": "lock-user", "operation-id": 99}, + ) result.addCallback(handle_callback) return result @@ -721,30 +1019,52 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. - self.assertMessages([messages[2], messages[1]], - [{"timestamp": 0, "type": "users", - "operation-id": 99, - "update-users": [{"home-phone": None, - "username": "psmith", - "uid": 1000, - "enabled": True, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": u"Paul Smith"}]}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 99, "timestamp": 0, - "result-text": "unlock_user succeeded"}]) - - users = [("psmith", "x", 1000, 1000, "Paul Smith,,,,", "/home/psmith", - "/bin/zsh")] + self.assertMessages( + [messages[2], messages[1]], + [ + { + "timestamp": 0, + "type": "users", + "operation-id": 99, + "update-users": [ + { + "home-phone": None, + "username": "psmith", + "uid": 1000, + "enabled": True, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "Paul Smith", + }, + ], + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 99, + "timestamp": 0, + "result-text": "unlock_user succeeded", + }, + ], + ) + + users = [ + ( + "psmith", + "x", + 1000, + 1000, + "Paul Smith,,,,", + "/home/psmith", + "/bin/zsh", + ), + ] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "psmith", - "type": "unlock-user", - "operation-id": 99}) + {"username": "psmith", "type": "unlock-user", "operation-id": 99}, + ) result.addCallback(handle_callback) return result @@ -760,17 +1080,23 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) failure_string = "UserManagementError: unlock_user failed" - self.assertMessages(messages, - [{"type": "operation-result", - "status": FAILED, - "operation-id": 99, "timestamp": 0, - "result-text": failure_string}]) + self.assertMessages( + messages, + [ + { + "type": "operation-result", + "status": FAILED, + "operation-id": 99, + "timestamp": 0, + "result-text": failure_string, + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"username": "jdoe", - "operation-id": 99, - "type": "unlock-user"}) + {"username": "jdoe", "operation-id": 99, "type": "unlock-user"}, + ) result.addCallback(handle_callback) return result @@ -795,14 +1121,22 @@ new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) - users = [("psmith", "x", 1000, 1000, "Paul Smith,,,,", "/home/psmith", - "/bin/zsh")] + users = [ + ( + "psmith", + "x", + 1000, + 1000, + "Paul Smith,,,,", + "/home/psmith", + "/bin/zsh", + ), + ] plugin = self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "psmith", - "operation-id": 99, - "type": "unlock-user"}) + {"username": "psmith", "operation-id": 99, "type": "unlock-user"}, + ) result.addCallback(handle_callback) return result @@ -818,40 +1152,64 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-users": [{"home-phone": None, - "username": "psmith", - "uid": 1000, - "enabled": False, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "Paul Smith"}]}, - {"type": "users", "operation-id": 99, - "update-users": [{"home-phone": None, - "username": "psmith", - "uid": 1000, - "enabled": True, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "Paul Smith"}]}]) - - users = [("psmith", "x", 1000, 1000, "Paul Smith,,,,", "/home/psmith", - "/bin/zsh")] + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-users": [ + { + "home-phone": None, + "username": "psmith", + "uid": 1000, + "enabled": False, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "Paul Smith", + }, + ], + }, + { + "type": "users", + "operation-id": 99, + "update-users": [ + { + "home-phone": None, + "username": "psmith", + "uid": 1000, + "enabled": True, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "Paul Smith", + }, + ], + }, + ], + ) + + users = [ + ( + "psmith", + "x", + 1000, + 1000, + "Paul Smith,,,,", + "/home/psmith", + "/bin/zsh", + ), + ] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( - {"username": "psmith", - "operation-id": 99, - "type": "unlock-user"}) + {"username": "psmith", "operation-id": 99, "type": "unlock-user"}, + ) result.addCallback(handle_callback) return result class GroupOperationsMessagingTest(UserGroupTestBase): - def test_add_group_event(self): """ When an C{add-group} message is received the group should be @@ -864,21 +1222,29 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 2) # Ignore the message created by plugin.run. - self.assertMessages([messages[1], messages[0]], - [{"type": "users", "timestamp": 0, - "operation-id": 123, - "create-groups": [{"gid": 1000, - "name": "bizdev"}]}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "add_group succeeded"}]) + self.assertMessages( + [messages[1], messages[0]], + [ + { + "type": "users", + "timestamp": 0, + "operation-id": 123, + "create-groups": [{"gid": 1000, "name": "bizdev"}], + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "add_group succeeded", + }, + ], + ) self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"groupname": "bizdev", - "type": "add-group", - "operation-id": 123}) + {"groupname": "bizdev", "type": "add-group", "operation-id": 123}, + ) result.addCallback(handle_callback) return result @@ -904,9 +1270,8 @@ plugin = self.setup_environment([], [], None) result = self.manager.dispatch_message( - {"groupname": "bizdev", - "operation-id": 123, - "type": "add-group"}) + {"groupname": "bizdev", "operation-id": 123, "type": "add-group"}, + ) result.addCallback(handle_callback1) return result @@ -922,20 +1287,26 @@ messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # We skip the operation-result message. - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-groups": [{"gid": 1001, - "name": "sales"}]}, - {"type": "users", "operation-id": 123, - "create-groups": [{"gid": 1002, - "name": "bizdev"}]}]) + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-groups": [{"gid": 1001, "name": "sales"}], + }, + { + "type": "users", + "operation-id": 123, + "create-groups": [{"gid": 1002, "name": "bizdev"}], + }, + ], + ) groups = [("sales", "x", 1001, [])] self.setup_environment([], groups, None) result = self.manager.dispatch_message( - {"groupname": "bizdev", - "type": "add-group", - "operation-id": 123}) + {"groupname": "bizdev", "type": "add-group", "operation-id": 123}, + ) result.addCallback(handle_callback) return result @@ -954,30 +1325,39 @@ self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. - expected = [{"create-groups": [{"gid": 50, - "name": "sales"}], - "timestamp": 0, - "type": "users"}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "set_group_details succeeded"}, - {"delete-groups": ["sales"], - "create-groups": [{"gid": 50, - "name": "bizdev"}], - "timestamp": 0, - "operation-id": 123, - "type": "users"}, - ] + expected = [ + { + "create-groups": [{"gid": 50, "name": "sales"}], + "timestamp": 0, + "type": "users", + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "set_group_details succeeded", + }, + { + "delete-groups": ["sales"], + "create-groups": [{"gid": 50, "name": "bizdev"}], + "timestamp": 0, + "operation-id": 123, + "type": "users", + }, + ] self.assertMessages(messages, expected) groups = [("sales", "x", 50, [])] self.setup_environment([], groups, None) result = self.manager.dispatch_message( - {"groupname": "sales", - "new-name": "bizdev", - "type": "edit-group", - "operation-id": 123}) + { + "groupname": "sales", + "new-name": "bizdev", + "type": "edit-group", + "operation-id": 123, + }, + ) result.addCallback(handle_callback) return result @@ -1004,11 +1384,14 @@ groups = [("sales", "x", 50, [])] plugin = self.setup_environment([], groups, None) result = self.manager.dispatch_message( - {"gid": 50, - "groupname": "sales", - "new-name": "bizdev", - "operation-id": 123, - "type": "edit-group"}) + { + "gid": 50, + "groupname": "sales", + "new-name": "bizdev", + "operation-id": 123, + "type": "edit-group", + }, + ) result.addCallback(handle_callback1) return result @@ -1023,8 +1406,13 @@ def handle_callback1(result): result = self.manager.dispatch_message( - {"groupname": "sales", "new-name": "webdev", - "operation-id": 123, "type": "edit-group"}) + { + "groupname": "sales", + "new-name": "webdev", + "operation-id": 123, + "type": "edit-group", + }, + ) result.addCallback(handle_callback2) return result @@ -1033,15 +1421,21 @@ message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertEqual(len(messages), 3) - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-groups": [{"gid": 1001, - "name": "sales"}]}, - {"type": "users", - "operation-id": 123, - "delete-groups": ["sales"], - "create-groups": [{"gid": 1001, - "name": "webdev"}]}]) + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-groups": [{"gid": 1001, "name": "sales"}], + }, + { + "type": "users", + "operation-id": 123, + "delete-groups": ["sales"], + "create-groups": [{"gid": 1001, "name": "webdev"}], + }, + ], + ) groups = [("sales", "x", 1001, [])] plugin = self.setup_environment([], groups, None) @@ -1063,25 +1457,36 @@ self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. - expected = [{"type": "users", "timestamp": 0, - "operation-id": 123, - "create-group-members": {"bizdev": ["jdoe"]}}, - {"type": "operation-result", - "timestamp": 0, - "status": SUCCEEDED, - "operation-id": 123, - "result-text": "add_group_member succeeded"}] + expected = [ + { + "type": "users", + "timestamp": 0, + "operation-id": 123, + "create-group-members": {"bizdev": ["jdoe"]}, + }, + { + "type": "operation-result", + "timestamp": 0, + "status": SUCCEEDED, + "operation-id": 123, + "result-text": "add_group_member succeeded", + }, + ] self.assertMessages([messages[2], messages[1]], expected) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": "jdoe", - "groupname": "bizdev", - "operation-id": 123, - "type": "add-group-member"}) + { + "username": "jdoe", + "groupname": "bizdev", + "operation-id": 123, + "type": "add-group-member", + }, + ) result.addCallback(handle_callback) return result @@ -1100,23 +1505,36 @@ self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. - expected = [{"type": "users", "timestamp": 0, - "operation-id": 123, - "create-group-members": {"bizdev": ["jdoe"]}}, - {"type": "operation-result", "timestamp": 0, - "status": SUCCEEDED, "operation-id": 123, - "result-text": "add_group_member succeeded"}] + expected = [ + { + "type": "users", + "timestamp": 0, + "operation-id": 123, + "create-group-members": {"bizdev": ["jdoe"]}, + }, + { + "type": "operation-result", + "timestamp": 0, + "status": SUCCEEDED, + "operation-id": 123, + "result-text": "add_group_member succeeded", + }, + ] self.assertMessages([messages[2], messages[1]], expected) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": "jdoe", - "groupname": "bizdev", - "type": "add-group-member", - "operation-id": 123}) + { + "username": "jdoe", + "groupname": "bizdev", + "type": "add-group-member", + "operation-id": 123, + }, + ) result.addCallback(handle_callback) return result @@ -1141,15 +1559,19 @@ new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, ["jdoe"])] plugin = self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": u"jdoe", - "groupname": u"bizdev", - "type": "add-group-member", - "operation-id": 123}) + { + "username": "jdoe", + "groupname": "bizdev", + "type": "add-group-member", + "operation-id": 123, + }, + ) result.addCallback(handle_callback) return result @@ -1165,30 +1587,44 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) - expected = [{"type": "users", - "create-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "John Doe"}], - "create-groups": [{"gid": 1001, - "name": "bizdev"}]}, - {"type": "users", "operation-id": 123, - "create-group-members": {"bizdev": ["jdoe"]}}] + expected = [ + { + "type": "users", + "create-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "John Doe", + }, + ], + "create-groups": [{"gid": 1001, "name": "bizdev"}], + }, + { + "type": "users", + "operation-id": 123, + "create-group-members": {"bizdev": ["jdoe"]}, + }, + ] self.assertMessages([messages[0], messages[2]], expected) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": "jdoe", - "groupname": "bizdev", - "type": "add-group-member", - "operation-id": 123}) + { + "username": "jdoe", + "groupname": "bizdev", + "type": "add-group-member", + "operation-id": 123, + }, + ) result.addCallback(handle_callback) return result @@ -1207,21 +1643,36 @@ # Ignore the message created by plugin.run. self.assertMessages( [messages[2], messages[1]], - [{"type": "users", "timestamp": 0, - "operation-id": 123, - "delete-group-members": {"bizdev": ["jdoe"]}}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "remove_group_member succeeded"}]) - - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + [ + { + "type": "users", + "timestamp": 0, + "operation-id": 123, + "delete-group-members": {"bizdev": ["jdoe"]}, + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "remove_group_member succeeded", + }, + ], + ) + + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, ["jdoe"])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": "jdoe", "groupname": "bizdev", - "type": "remove-group-member", "operation-id": 123}) + { + "username": "jdoe", + "groupname": "bizdev", + "type": "remove-group-member", + "operation-id": 123, + }, + ) result.addCallback(handle_callback) return result @@ -1245,14 +1696,19 @@ new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, ["jdoe"])] plugin = self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"username": "jdoe", "groupname": "bizdev", - "type": "remove-group-member", - "operation-id": 123}) + { + "username": "jdoe", + "groupname": "bizdev", + "type": "remove-group-member", + "operation-id": 123, + }, + ) result.addCallback(handle_callback1) return result @@ -1268,31 +1724,46 @@ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) - expected = [{"timestamp": 0, "type": "users", - "create-users": [{"home-phone": None, - "username": "jdoe", - "uid": 1000, - "enabled": True, - "location": None, - "work-phone": None, - "primary-gid": 1000, - "name": "John Doe"}], - "create-groups": [{"gid": 1001, - "name": "bizdev"}], - "create-group-members": {"bizdev": ["jdoe"]}}, - {"type": "users", "operation-id": 123, - "delete-group-members": {"bizdev": ["jdoe"]}}] + expected = [ + { + "timestamp": 0, + "type": "users", + "create-users": [ + { + "home-phone": None, + "username": "jdoe", + "uid": 1000, + "enabled": True, + "location": None, + "work-phone": None, + "primary-gid": 1000, + "name": "John Doe", + }, + ], + "create-groups": [{"gid": 1001, "name": "bizdev"}], + "create-group-members": {"bizdev": ["jdoe"]}, + }, + { + "type": "users", + "operation-id": 123, + "delete-group-members": {"bizdev": ["jdoe"]}, + }, + ] self.assertMessages([messages[0], messages[2]], expected) - users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", - "/home/jdoe")] + users = [ + ("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe"), + ] groups = [("bizdev", "x", 1001, ["jdoe"])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( - {"groupname": "bizdev", - "username": "jdoe", - "type": "remove-group-member", - "operation-id": 123}) + { + "groupname": "bizdev", + "username": "jdoe", + "type": "remove-group-member", + "operation-id": 123, + }, + ) result.addCallback(handle_callback) return result @@ -1307,8 +1778,12 @@ def handle_callback1(result): result = self.manager.dispatch_message( - {"groupname": "sales", "type": "remove-group", - "operation-id": 123}) + { + "groupname": "sales", + "type": "remove-group", + "operation-id": 123, + }, + ) result.addCallback(handle_callback2) return result @@ -1319,14 +1794,24 @@ self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. - self.assertMessages([messages[2], messages[1]], - [{"type": "users", "timestamp": 0, - "operation-id": 123, - "delete-groups": ["sales"]}, - {"type": "operation-result", - "status": SUCCEEDED, - "operation-id": 123, "timestamp": 0, - "result-text": "remove_group succeeded"}]) + self.assertMessages( + [messages[2], messages[1]], + [ + { + "type": "users", + "timestamp": 0, + "operation-id": 123, + "delete-groups": ["sales"], + }, + { + "type": "operation-result", + "status": SUCCEEDED, + "operation-id": 123, + "timestamp": 0, + "result-text": "remove_group succeeded", + }, + ], + ) groups = [("sales", "x", 1001, ["jdoe"])] plugin = self.setup_environment([], groups, None) @@ -1358,9 +1843,12 @@ groups = [("sales", "x", 50, [])] plugin = self.setup_environment([], groups, None) result = self.manager.dispatch_message( - {"groupname": "sales", - "operation-id": 123, - "type": "remove-group"}) + { + "groupname": "sales", + "operation-id": 123, + "type": "remove-group", + }, + ) result.addCallback(handle_callback1) return result @@ -1375,8 +1863,12 @@ def handle_callback1(result): result = self.manager.dispatch_message( - {"groupname": "sales", "operation-id": 123, - "type": "remove-group"}) + { + "groupname": "sales", + "operation-id": 123, + "type": "remove-group", + }, + ) result.addCallback(handle_callback2) return result @@ -1384,13 +1876,20 @@ message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertEqual(len(messages), 3) - self.assertMessages([messages[0], messages[2]], - [{"type": "users", - "create-groups": [{"gid": 1001, - "name": "sales"}]}, - {"type": "users", - "delete-groups": ["sales"], - "operation-id": 123}]) + self.assertMessages( + [messages[0], messages[2]], + [ + { + "type": "users", + "create-groups": [{"gid": 1001, "name": "sales"}], + }, + { + "type": "users", + "delete-groups": ["sales"], + "operation-id": 123, + }, + ], + ) groups = [("sales", "x", 1001, [])] plugin = self.setup_environment([], groups, None) @@ -1400,9 +1899,8 @@ class UserManagerTest(LandscapeTest): - def setUp(self): - super(UserManagerTest, self).setUp() + super().setUp() self.shadow_file = self.makeFile() self.user_manager = UserManager(shadow_file=self.shadow_file) @@ -1412,9 +1910,11 @@ of locked users. """ fd = open(self.shadow_file, "w") - fd.write("jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7:::\n" - "psmith:!:13348:0:99999:7:::\n" - "yo:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7:::\n") + fd.write( + "jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7:::\n" + "psmith:!:13348:0:99999:7:::\n" + "yo:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7:::\n", + ) fd.close() self.assertEqual(self.user_manager.get_locked_usernames(), ["psmith"]) @@ -1436,8 +1936,11 @@ self.log_helper.ignore_errors("Error reading shadow file.*") self.assertFalse(os.path.exists(self.shadow_file)) self.assertEqual(self.user_manager.get_locked_usernames(), []) - self.assertIn("Error reading shadow file. [Errno 2] No such file or " - "directory", self.logfile.getvalue()) + self.assertIn( + "Error reading shadow file. [Errno 2] No such file or " + "directory", + self.logfile.getvalue(), + ) class RemoteUserManagerTest(LandscapeTest): @@ -1445,15 +1948,17 @@ helpers = [ManagerHelper] def setUp(self): - super(RemoteUserManagerTest, self).setUp() + super().setUp() def set_remote(remote): self.remote_user_manager = remote self.shadow_file = self.makeFile() self.user_manager = UserManager(shadow_file=self.shadow_file) - self.user_manager_connector = RemoteUserManagerConnector(self.reactor, - self.config) + self.user_manager_connector = RemoteUserManagerConnector( + self.reactor, + self.config, + ) self.user_manager.register(self.manager) connected = self.user_manager_connector.connect() return connected.addCallback(set_remote) @@ -1461,7 +1966,7 @@ def tearDown(self): self.user_manager_connector.disconnect() self.user_manager.stop() - return super(RemoteUserManagerTest, self).tearDown() + return super().tearDown() def test_get_locked_usernames(self): """ @@ -1471,5 +1976,5 @@ self.user_manager.get_locked_usernames = Mock(return_value=["fred"]) deferred = self.remote_user_manager.get_locked_usernames() result = self.successResultOf(deferred) - self.assertEqual(result, ['fred']) + self.assertEqual(result, ["fred"]) self.user_manager.get_locked_usernames.assert_called_once_with() diff -Nru landscape-client-23.02/landscape/client/manager/usermanager.py landscape-client-23.08/landscape/client/manager/usermanager.py --- landscape-client-23.02/landscape/client/manager/usermanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/manager/usermanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,11 @@ import logging -from landscape.client.amp import ComponentConnector, ComponentPublisher, remote - -from landscape.client.user.management import UserManagement +from landscape.client.amp import ComponentConnector +from landscape.client.amp import ComponentPublisher +from landscape.client.amp import remote from landscape.client.manager.plugin import ManagerPlugin from landscape.client.monitor.usermonitor import RemoteUserMonitorConnector +from landscape.client.user.management import UserManagement class UserManager(ManagerPlugin): @@ -14,17 +15,18 @@ def __init__(self, management=None, shadow_file="/etc/shadow"): self._management = management or UserManagement() self._shadow_file = shadow_file - self._message_types = {"add-user": self._add_user, - "edit-user": self._edit_user, - "lock-user": self._lock_user, - "unlock-user": self._unlock_user, - "remove-user": self._remove_user, - "add-group": self._add_group, - "edit-group": self._edit_group, - "remove-group": self._remove_group, - "add-group-member": self._add_group_member, - "remove-group-member": - self._remove_group_member} + self._message_types = { + "add-user": self._add_user, + "edit-user": self._edit_user, + "lock-user": self._lock_user, + "unlock-user": self._unlock_user, + "remove-user": self._remove_user, + "add-group": self._add_group, + "edit-group": self._edit_group, + "remove-group": self._remove_group, + "add-group-member": self._add_group_member, + "remove-group-member": self._remove_group_member, + } self._publisher = None def register(self, registry): @@ -32,16 +34,21 @@ Schedule reactor events for generic L{Plugin} callbacks, user and group management operations, and resynchronization. """ - super(UserManager, self).register(registry) + super().register(registry) self._registry = registry - self._publisher = ComponentPublisher(self, self.registry.reactor, - self.registry.config) + self._publisher = ComponentPublisher( + self, + self.registry.reactor, + self.registry.config, + ) self._publisher.start() for message_type in self._message_types: - self._registry.register_message(message_type, - self._message_dispatch) + self._registry.register_message( + message_type, + self._message_dispatch, + ) def stop(self): """Stop listening for incoming AMP connections.""" @@ -61,8 +68,8 @@ if len(parts) > 1: if parts[1].startswith("!"): locked_users.append(parts[0].strip()) - except IOError as e: - logging.error("Error reading shadow file. %s" % e) + except OSError as e: + logging.error(f"Error reading shadow file. {e}") return locked_users def _message_dispatch(self, message): @@ -71,7 +78,9 @@ @param message: The request we got from the server. """ user_monitor_connector = RemoteUserMonitorConnector( - self.registry.reactor, self.registry.config) + self.registry.reactor, + self.registry.config, + ) def detect_changes(user_monitor): self._user_monitor = user_monitor @@ -87,30 +96,39 @@ def _perform_operation(self, result, message): message_type = message["type"] message_method = self._message_types[message_type] - return self.call_with_operation_result(message, message_method, - message) + return self.call_with_operation_result( + message, + message_method, + message, + ) def _send_changes(self, result, message): return self._user_monitor.detect_changes(message["operation-id"]) def _add_user(self, message): """Run an C{add-user} operation.""" - return self._management.add_user(message["username"], message["name"], - message["password"], - message["require-password-reset"], - message["primary-group-name"], - message["location"], - message["work-number"], - message["home-number"]) + return self._management.add_user( + message["username"], + message["name"], + message["password"], + message["require-password-reset"], + message["primary-group-name"], + message["location"], + message["work-number"], + message["home-number"], + ) def _edit_user(self, message): """Run an C{edit-user} operation.""" return self._management.set_user_details( - message["username"], password=message["password"], - name=message["name"], location=message["location"], - work_number=message["work-number"], - home_number=message["home-number"], - primary_group_name=message["primary-group-name"]) + message["username"], + password=message["password"], + name=message["name"], + location=message["location"], + work_number=message["work-number"], + home_number=message["home-number"], + primary_group_name=message["primary-group-name"], + ) def _lock_user(self, message): """Run a C{lock-user} operation.""" @@ -122,8 +140,10 @@ def _remove_user(self, message): """Run a C{remove-user} operation.""" - return self._management.remove_user(message["username"], - message["delete-home"]) + return self._management.remove_user( + message["username"], + message["delete-home"], + ) def _add_group(self, message): """Run an C{add-group} operation.""" @@ -131,18 +151,24 @@ def _edit_group(self, message): """Run an C{edit-group} operation.""" - return self._management.set_group_details(message["groupname"], - message["new-name"]) + return self._management.set_group_details( + message["groupname"], + message["new-name"], + ) def _add_group_member(self, message): """Run an C{add-group-member} operation.""" - return self._management.add_group_member(message["username"], - message["groupname"]) + return self._management.add_group_member( + message["username"], + message["groupname"], + ) def _remove_group_member(self, message): """Run a C{remove-group-member} operation.""" - return self._management.remove_group_member(message["username"], - message["groupname"]) + return self._management.remove_group_member( + message["username"], + message["groupname"], + ) def _remove_group(self, message): """Run an C{remove-group} operation.""" diff -Nru landscape-client-23.02/landscape/client/monitor/activeprocessinfo.py landscape-client-23.08/landscape/client/monitor/activeprocessinfo.py --- landscape-client-23.02/landscape/client/monitor/activeprocessinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/activeprocessinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,9 +3,9 @@ from twisted.python.compat import itervalues from landscape.client.diff import diff -from landscape.lib.process import ProcessInformation -from landscape.lib.jiffies import detect_jiffies from landscape.client.monitor.plugin import DataWatcher +from landscape.lib.jiffies import detect_jiffies +from landscape.lib.process import ProcessInformation class ActiveProcessInfo(DataWatcher): @@ -13,22 +13,30 @@ message_type = "active-process-info" scope = "process" - def __init__(self, proc_dir="/proc", boot_time=None, jiffies=None, - uptime=None, popen=subprocess.Popen): - super(ActiveProcessInfo, self).__init__() + def __init__( + self, + proc_dir="/proc", + boot_time=None, + jiffies=None, + uptime=None, + popen=subprocess.Popen, + ): + super().__init__() self._proc_dir = proc_dir self._persist_processes = {} self._previous_processes = {} self._jiffies_per_sec = jiffies or detect_jiffies() self._popen = popen self._first_run = True - self._process_info = ProcessInformation(proc_dir=proc_dir, - jiffies=jiffies, - boot_time=boot_time, - uptime=uptime) + self._process_info = ProcessInformation( + proc_dir=proc_dir, + jiffies=jiffies, + boot_time=boot_time, + uptime=uptime, + ) def register(self, manager): - super(ActiveProcessInfo, self).register(manager) + super().register(manager) self.call_on_accepted(self.message_type, self.exchange, True) def _reset(self): diff -Nru landscape-client-23.02/landscape/client/monitor/aptpreferences.py landscape-client-23.08/landscape/client/monitor/aptpreferences.py --- landscape-client-23.02/landscape/client/monitor/aptpreferences.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/aptpreferences.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,10 +2,9 @@ from twisted.python.compat import iteritems -from landscape.lib.fs import read_text_file -from landscape.constants import APT_PREFERENCES_SIZE_LIMIT - from landscape.client.monitor.plugin import DataWatcher +from landscape.constants import APT_PREFERENCES_SIZE_LIMIT +from landscape.lib.fs import read_text_file class AptPreferences(DataWatcher): @@ -30,13 +29,17 @@ simply return C{None} """ data = {} - preferences_filename = os.path.join(self._etc_apt_directory, - u"preferences") + preferences_filename = os.path.join( + self._etc_apt_directory, + "preferences", + ) if os.path.exists(preferences_filename): data[preferences_filename] = read_text_file(preferences_filename) - preferences_directory = os.path.join(self._etc_apt_directory, - u"preferences.d") + preferences_directory = os.path.join( + self._etc_apt_directory, + "preferences.d", + ) if os.path.isdir(preferences_directory): for entry in os.listdir(preferences_directory): filename = os.path.join(preferences_directory, entry) diff -Nru landscape-client-23.02/landscape/client/monitor/cephusage.py landscape-client-23.08/landscape/client/monitor/cephusage.py --- landscape-client-23.02/landscape/client/monitor/cephusage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/cephusage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,18 @@ import logging -import time import os +import time from twisted.internet import threads from twisted.python.compat import unicode from landscape.client.accumulate import Accumulator -from landscape.lib.monitor import CoverageMonitor from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.monitor import CoverageMonitor try: from rados import Rados + has_rados = hasattr(Rados, "get_cluster_stats") except ImportError: has_rados = False @@ -45,8 +46,12 @@ # Prevent the Plugin base-class from scheduling looping calls. run_interval = None - def __init__(self, interval=30, monitor_interval=60 * 60, - create_time=time.time): + def __init__( + self, + interval=30, + monitor_interval=60 * 60, + create_time=time.time, + ): self.active = True self._has_rados = has_rados self._interval = interval @@ -57,19 +62,26 @@ self._ceph_config = None def register(self, registry): - super(CephUsage, self).register(registry) + super().register(registry) self._ceph_config = os.path.join( - self.registry.config.data_path, "ceph-client", - "ceph.landscape-client.conf") + self.registry.config.data_path, + "ceph-client", + "ceph.landscape-client.conf", + ) self._accumulate = Accumulator(self._persist, self._interval) self._monitor = CoverageMonitor( - self._interval, 0.8, "Ceph usage snapshot", - create_time=self._create_time) + self._interval, + 0.8, + "Ceph usage snapshot", + create_time=self._create_time, + ) self.registry.reactor.call_every(self._interval, self.run) self.registry.reactor.call_every( - self._monitor_interval, self._monitor.log) + self._monitor_interval, + self._monitor.log, + ) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("ceph-usage", self.send_message, True) @@ -77,20 +89,28 @@ ceph_points = self._ceph_usage_points ring_id = self._ceph_ring_id self._ceph_usage_points = [] - return {"type": "ceph-usage", - "ring-id": ring_id, - "ceph-usages": [], # For backwards-compatibility - "data-points": ceph_points} + return { + "type": "ceph-usage", + "ring-id": ring_id, + "ceph-usages": [], # For backwards-compatibility + "data-points": ceph_points, + } def send_message(self, urgent=False): message = self.create_message() if message["ring-id"] and message["data-points"]: self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): self.registry.broker.call_if_accepted( - "ceph-usage", self.send_message, urgent) + "ceph-usage", + self.send_message, + urgent, + ) def run(self): if not self._should_run(): @@ -107,8 +127,10 @@ return False if not self._has_rados: - logging.info("This machine does not appear to be a Ceph machine. " - "Deactivating plugin.") + logging.info( + "This machine does not appear to be a Ceph machine. " + "Deactivating plugin.", + ) self.active = False return False @@ -121,8 +143,10 @@ def _perform_rados_call(self): """The actual Rados interaction.""" - with Rados(conffile=self._ceph_config, - rados_id="landscape-client") as cluster: + with Rados( + conffile=self._ceph_config, + rados_id="landscape-client", + ) as cluster: cluster_stats = cluster.get_cluster_stats() if self._ceph_ring_id is None: @@ -137,13 +161,16 @@ Parses the output and stores the usage data in an accumulator. """ names_map = [ - ("total", "kb"), ("avail", "kb_avail"), ("used", "kb_used")] + ("total", "kb"), + ("avail", "kb_avail"), + ("used", "kb_used"), + ] timestamp = int(self._create_time()) step_values = [] for name, key in names_map: value = cluster_stats[key] * 1024 # Report usage in bytes - step_value = self._accumulate(timestamp, value, "usage.%s" % name) + step_value = self._accumulate(timestamp, value, f"usage.{name}") step_values.append(step_value) if not all(step_values): diff -Nru landscape-client-23.02/landscape/client/monitor/computerinfo.py landscape-client-23.08/landscape/client/monitor/computerinfo.py --- landscape-client-23.02/landscape/client/monitor/computerinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/computerinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,13 +1,16 @@ -import os import logging -from twisted.internet.defer import inlineCallbacks, returnValue +import os +from twisted.internet.defer import inlineCallbacks +from twisted.internet.defer import returnValue + +from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.cloud import fetch_ec2_meta_data from landscape.lib.fetch import fetch_async from landscape.lib.fs import read_text_file -from landscape.lib.lsb_release import LSB_RELEASE_FILENAME, parse_lsb_release -from landscape.lib.cloud import fetch_ec2_meta_data +from landscape.lib.lsb_release import LSB_RELEASE_FILENAME +from landscape.lib.lsb_release import parse_lsb_release from landscape.lib.network import get_fqdn -from landscape.client.monitor.plugin import MonitorPlugin METADATA_RETRY_MAX = 3 # Number of retries to get EC2 meta-data @@ -22,10 +25,14 @@ persist_name = "computer-info" scope = "computer" - def __init__(self, get_fqdn=get_fqdn, - meminfo_filename="/proc/meminfo", - lsb_release_filename=LSB_RELEASE_FILENAME, - root_path="/", fetch_async=fetch_async): + def __init__( + self, + get_fqdn=get_fqdn, + meminfo_filename="/proc/meminfo", + lsb_release_filename=LSB_RELEASE_FILENAME, + root_path="/", + fetch_async=fetch_async, + ): self._get_fqdn = get_fqdn self._meminfo_filename = meminfo_filename self._lsb_release_filename = lsb_release_filename @@ -35,50 +42,77 @@ self._fetch_async = fetch_async def register(self, registry): - super(ComputerInfo, self).register(registry) + super().register(registry) self._annotations_path = registry.config.annotations_path - self.call_on_accepted("computer-info", - self.send_computer_message, True) - self.call_on_accepted("distribution-info", - self.send_distribution_message, True) - self.call_on_accepted("cloud-instance-metadata", - self.send_cloud_instance_metadata_message, True) + self.call_on_accepted( + "computer-info", + self.send_computer_message, + True, + ) + self.call_on_accepted( + "distribution-info", + self.send_distribution_message, + True, + ) + self.call_on_accepted( + "cloud-instance-metadata", + self.send_cloud_instance_metadata_message, + True, + ) def send_computer_message(self, urgent=False): message = self._create_computer_info_message() if message: message["type"] = "computer-info" logging.info("Queueing message with updated computer info.") - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) def send_distribution_message(self, urgent=False): message = self._create_distribution_info_message() if message: message["type"] = "distribution-info" logging.info("Queueing message with updated distribution info.") - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) @inlineCallbacks def send_cloud_instance_metadata_message(self, urgent=False): message = yield self._create_cloud_instance_metadata_message() if message: message["type"] = "cloud-instance-metadata" - logging.info("Queueing message with updated cloud instance " - "metadata.") - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + logging.info( + "Queueing message with updated cloud instance metadata.", + ) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): broker = self.registry.broker - broker.call_if_accepted("computer-info", - self.send_computer_message, urgent) - broker.call_if_accepted("distribution-info", - self.send_distribution_message, urgent) - broker.call_if_accepted("cloud-instance-metadata", - self.send_cloud_instance_metadata_message, - urgent) + broker.call_if_accepted( + "computer-info", + self.send_computer_message, + urgent, + ) + broker.call_if_accepted( + "distribution-info", + self.send_distribution_message, + urgent, + ) + broker.call_if_accepted( + "cloud-instance-metadata", + self.send_cloud_instance_metadata_message, + urgent, + ) def _create_computer_info_message(self): message = {} @@ -90,7 +124,8 @@ if os.path.exists(self._annotations_path): for key in os.listdir(self._annotations_path): annotations[key] = read_text_file( - os.path.join(self._annotations_path, key)) + os.path.join(self._annotations_path, key), + ) if annotations: self._add_if_new(message, "annotations", annotations) @@ -113,7 +148,7 @@ message = {} file = open(self._meminfo_filename) for line in file: - if line != '\n': + if line != "\n": parts = line.split(":") key = parts[0] if key in ["MemTotal", "SwapTotal"]: @@ -132,9 +167,10 @@ def _create_cloud_instance_metadata_message(self): """Fetch cloud metadata and insert it in a message.""" message = None - if (self._cloud_instance_metadata is None and - self._cloud_retries < METADATA_RETRY_MAX - ): + if ( + self._cloud_instance_metadata is None + and self._cloud_retries < METADATA_RETRY_MAX + ): self._cloud_instance_metadata = yield self._fetch_ec2_meta_data() message = self._cloud_instance_metadata @@ -149,8 +185,9 @@ def log_no_meta_data_found(error): self._cloud_retries += 1 if self._cloud_retries >= METADATA_RETRY_MAX: - logging.info("No cloud meta-data available. %s" % - error.getErrorMessage()) + logging.info( + f"No cloud meta-data available. {error.getErrorMessage()}", + ) def log_success(result): logging.info("Acquired cloud meta-data.") diff -Nru landscape-client-23.02/landscape/client/monitor/computertags.py landscape-client-23.08/landscape/client/monitor/computertags.py --- landscape-client-23.02/landscape/client/monitor/computertags.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/computertags.py 2023-08-17 21:19:32.000000000 +0000 @@ -16,7 +16,7 @@ run_immediately = True def __init__(self, args=sys.argv): - super(ComputerTags, self).__init__() + super().__init__() self.args = args # Defined to specify args in unit tests def get_data(self): diff -Nru landscape-client-23.02/landscape/client/monitor/computeruptime.py landscape-client-23.08/landscape/client/monitor/computeruptime.py --- landscape-client-23.02/landscape/client/monitor/computeruptime.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/computeruptime.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,7 @@ import os.path -from landscape.lib import sysstats from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib import sysstats class ComputerUptime(MonitorPlugin): @@ -16,7 +16,7 @@ def register(self, registry): """Register this plugin with the specified plugin manager.""" - super(ComputerUptime, self).register(registry) + super().register(registry) registry.reactor.call_on("run", self.run) self.call_on_accepted("computer-uptime", self.run, True) @@ -32,21 +32,30 @@ if self._first_run: filename = self._wtmp_file + ".1" if os.path.isfile(filename): - broker.call_if_accepted("computer-uptime", - self.send_message, - filename, - urgent) + broker.call_if_accepted( + "computer-uptime", + self.send_message, + filename, + urgent, + ) if os.path.isfile(self._wtmp_file): - broker.call_if_accepted("computer-uptime", self.send_message, - self._wtmp_file, urgent) + broker.call_if_accepted( + "computer-uptime", + self.send_message, + self._wtmp_file, + urgent, + ) def send_message(self, filename, urgent=False): message = self._create_message(filename) if "shutdown-times" in message or "startup-times" in message: message["type"] = "computer-uptime" - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) def _create_message(self, filename): """Generate a message with new startup and shutdown times.""" @@ -57,9 +66,11 @@ last_startup_time = self._persist.get("last-startup-time", 0) last_shutdown_time = self._persist.get("last-shutdown-time", 0) - times = sysstats.BootTimes(filename, - boots_newer_than=last_startup_time, - shutdowns_newer_than=last_shutdown_time) + times = sysstats.BootTimes( + filename, + boots_newer_than=last_startup_time, + shutdowns_newer_than=last_shutdown_time, + ) startup_times, shutdown_times = times.get_times() diff -Nru landscape-client-23.02/landscape/client/monitor/config.py landscape-client-23.08/landscape/client/monitor/config.py --- landscape-client-23.02/landscape/client/monitor/config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,30 @@ from landscape.client.deployment import Configuration -ALL_PLUGINS = ["ActiveProcessInfo", "ComputerInfo", - "LoadAverage", "MemoryInfo", "MountInfo", "ProcessorInfo", - "Temperature", "PackageMonitor", "UserMonitor", - "RebootRequired", "AptPreferences", "NetworkActivity", - "NetworkDevice", "UpdateManager", "CPUUsage", "SwiftUsage", - "CephUsage", "ComputerTags", "UbuntuProInfo"] +ALL_PLUGINS = [ + "ActiveProcessInfo", + "ComputerInfo", + "LoadAverage", + "MemoryInfo", + "MountInfo", + "ProcessorInfo", + "Temperature", + "PackageMonitor", + "UserMonitor", + "RebootRequired", + "AptPreferences", + "NetworkActivity", + "NetworkDevice", + "UpdateManager", + "CPUUsage", + "SwiftUsage", + "CephUsage", + "ComputerTags", + "UbuntuProInfo", + "LivePatch", + "UbuntuProRebootRequired", + "SnapMonitor", +] class MonitorConfiguration(Configuration): @@ -17,12 +35,15 @@ Specialize L{Configuration.make_parser}, adding many monitor-specific options. """ - parser = super(MonitorConfiguration, self).make_parser() + parser = super().make_parser() - parser.add_option("--monitor-plugins", metavar="PLUGIN_LIST", - help="Comma-delimited list of monitor plugins to " - "use. ALL means use all plugins.", - default="ALL") + parser.add_option( + "--monitor-plugins", + metavar="PLUGIN_LIST", + help="Comma-delimited list of monitor plugins to " + "use. ALL means use all plugins.", + default="ALL", + ) return parser @property diff -Nru landscape-client-23.02/landscape/client/monitor/cpuusage.py landscape-client-23.08/landscape/client/monitor/cpuusage.py --- landscape-client-23.02/landscape/client/monitor/cpuusage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/cpuusage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,9 @@ -import time import logging +import time from landscape.client.accumulate import Accumulator -from landscape.lib.monitor import CoverageMonitor from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.monitor import CoverageMonitor LAST_MESURE_KEY = "last-cpu-usage-measure" ACCUMULATOR_KEY = "cpu-usage-accumulator" @@ -13,13 +13,18 @@ """ Plugin that captures CPU usage information. """ + persist_name = "cpu-usage" scope = "cpu" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None - def __init__(self, interval=30, monitor_interval=60 * 60, - create_time=time.time): + def __init__( + self, + interval=30, + monitor_interval=60 * 60, + create_time=time.time, + ): self._interval = interval self._monitor_interval = monitor_interval self._cpu_usage_points = [] @@ -27,16 +32,21 @@ self._stat_file = "/proc/stat" def register(self, registry): - super(CPUUsage, self).register(registry) + super().register(registry) self._accumulate = Accumulator(self._persist, registry.step_size) self.registry.reactor.call_every(self._interval, self.run) - self._monitor = CoverageMonitor(self._interval, 0.8, - "CPU usage snapshot", - create_time=self._create_time) - self.registry.reactor.call_every(self._monitor_interval, - self._monitor.log) + self._monitor = CoverageMonitor( + self._interval, + 0.8, + "CPU usage snapshot", + create_time=self._create_time, + ) + self.registry.reactor.call_every( + self._monitor_interval, + self._monitor.log, + ) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("cpu-usage", self.send_message, True) @@ -48,12 +58,18 @@ def send_message(self, urgent=False): message = self.create_message() if len(message["cpu-usages"]): - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): - self.registry.broker.call_if_accepted("cpu-usage", - self.send_message, urgent) + self.registry.broker.call_if_accepted( + "cpu-usage", + self.send_message, + urgent, + ) def run(self): self._monitor.ping() @@ -62,8 +78,11 @@ step_data = None if new_cpu_usage is not None: - step_data = self._accumulate(new_timestamp, new_cpu_usage, - ACCUMULATOR_KEY) + step_data = self._accumulate( + new_timestamp, + new_cpu_usage, + ACCUMULATOR_KEY, + ) if step_data is not None: self._cpu_usage_points.append(step_data) @@ -77,9 +96,11 @@ # The first line of the file is the CPU information aggregated # across cores. stat = f.readline() - except IOError: - logging.error("Could not open %s for reading, " - "CPU usage cannot be computed.", stat_file) + except OSError: + logging.error( + f"Could not open {stat_file} for reading, " + "CPU usage cannot be computed.", + ) return None # The cpu line is composed of: diff -Nru landscape-client-23.02/landscape/client/monitor/livepatch.py landscape-client-23.08/landscape/client/monitor/livepatch.py --- landscape-client-23.02/landscape/client/monitor/livepatch.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/livepatch.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,71 @@ +import json +import subprocess +import yaml + +from landscape.client.monitor.plugin import DataWatcher + + +class LivePatch(DataWatcher): + """ + Plugin that captures and reports Livepatch status information + information. + """ + + message_type = "livepatch" + message_key = message_type + persist_name = message_type + scope = "livepatch" + run_immediately = True + run_interval = 1800 # Every 30 min + + def get_data(self): + json_output = get_livepatch_status('json') + readable_output = get_livepatch_status('humane') + return json.dumps({'humane': readable_output, 'json': json_output}, + sort_keys=True) # Prevent randomness for cache + + +def get_livepatch_status(format_type): + """ + Livepatch returns output formatted either 'json' or 'humane' (human- + readable yaml). This function takes the the output and parses it into a + python dictionary and sticks it in "output" along with error and return + code information. + """ + + data = {} + try: + completed_process = subprocess.run( + ["canonical-livepatch", "status", "--format", format_type], + encoding="utf8", stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except FileNotFoundError as exc: + data['return_code'] = -1 + data['error'] = str(exc) + data['output'] = "" + except Exception as exc: + data['return_code'] = -2 + data['error'] = str(exc) + data['output'] = "" + else: + output = completed_process.stdout.strip() + try: + if output: # We don't want to parse an empty string + if format_type == 'json': + output = json.loads(output) + if 'Last-Check' in output: # Remove timestamps for cache + del output['Last-Check'] + if 'Uptime' in output: + del output['Uptime'] + else: + output = yaml.safe_load(output) + if 'last check' in output: + del output['last check'] + data['return_code'] = completed_process.returncode + data['error'] = completed_process.stderr + data['output'] = output + except (yaml.YAMLError, json.decoder.JSONDecodeError) as exc: + data['return_code'] = completed_process.returncode + data['error'] = str(exc) + data['output'] = output + + return data diff -Nru landscape-client-23.02/landscape/client/monitor/loadaverage.py landscape-client-23.08/landscape/client/monitor/loadaverage.py --- landscape-client-23.02/landscape/client/monitor/loadaverage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/loadaverage.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,8 +2,8 @@ import time from landscape.client.accumulate import Accumulator -from landscape.lib.monitor import CoverageMonitor from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.monitor import CoverageMonitor class LoadAverage(MonitorPlugin): @@ -14,8 +14,13 @@ # Prevent the Plugin base-class from scheduling looping calls. run_interval = None - def __init__(self, interval=15, monitor_interval=60*60, - create_time=time.time, get_load_average=os.getloadavg): + def __init__( + self, + interval=15, + monitor_interval=60 * 60, + create_time=time.time, + get_load_average=os.getloadavg, + ): self._interval = interval self._monitor_interval = monitor_interval self._create_time = create_time @@ -23,16 +28,21 @@ self._get_load_average = get_load_average def register(self, registry): - super(LoadAverage, self).register(registry) + super().register(registry) self._accumulate = Accumulator(self._persist, registry.step_size) self.registry.reactor.call_every(self._interval, self.run) - self._monitor = CoverageMonitor(self._interval, 0.8, - "load average snapshot", - create_time=self._create_time) - self.registry.reactor.call_every(self._monitor_interval, - self._monitor.log) + self._monitor = CoverageMonitor( + self._interval, + 0.8, + "load average snapshot", + create_time=self._create_time, + ) + self.registry.reactor.call_every( + self._monitor_interval, + self._monitor.log, + ) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("load-average", self.send_message, True) @@ -42,20 +52,29 @@ return {"type": "load-average", "load-averages": load_averages} def exchange(self, urgent=False): - self.registry.broker.call_if_accepted("load-average", - self.send_message, urgent) + self.registry.broker.call_if_accepted( + "load-average", + self.send_message, + urgent, + ) def send_message(self, urgent=False): message = self.create_message() if len(message["load-averages"]): - self.registry.broker.send_message(message, self._session_id, - urgent=urgent) + self.registry.broker.send_message( + message, + self._session_id, + urgent=urgent, + ) def run(self): self._monitor.ping() new_timestamp = int(self._create_time()) new_load_average = self._get_load_average()[0] - step_data = self._accumulate(new_timestamp, new_load_average, - "accumulate") + step_data = self._accumulate( + new_timestamp, + new_load_average, + "accumulate", + ) if step_data: self._load_averages.append(step_data) diff -Nru landscape-client-23.02/landscape/client/monitor/memoryinfo.py landscape-client-23.08/landscape/client/monitor/memoryinfo.py --- landscape-client-23.02/landscape/client/monitor/memoryinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/memoryinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,9 @@ import time -from landscape.lib.monitor import CoverageMonitor -from landscape.lib.sysstats import MemoryStats - from landscape.client.accumulate import Accumulator from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.monitor import CoverageMonitor +from landscape.lib.sysstats import MemoryStats class MemoryInfo(MonitorPlugin): @@ -15,8 +14,13 @@ # Prevent the Plugin base-class from scheduling looping calls. run_interval = None - def __init__(self, interval=15, monitor_interval=60 * 60, - source_filename="/proc/meminfo", create_time=time.time): + def __init__( + self, + interval=15, + monitor_interval=60 * 60, + source_filename="/proc/meminfo", + create_time=time.time, + ): self._interval = interval self._monitor_interval = monitor_interval self._source_filename = source_filename @@ -24,14 +28,19 @@ self._create_time = create_time def register(self, registry): - super(MemoryInfo, self).register(registry) + super().register(registry) self._accumulate = Accumulator(self._persist, self.registry.step_size) self.registry.reactor.call_every(self._interval, self.run) - self._monitor = CoverageMonitor(self._interval, 0.8, - "memory/swap snapshot", - create_time=self._create_time) - self.registry.reactor.call_every(self._monitor_interval, - self._monitor.log) + self._monitor = CoverageMonitor( + self._interval, + 0.8, + "memory/swap snapshot", + create_time=self._create_time, + ) + self.registry.reactor.call_every( + self._monitor_interval, + self._monitor.log, + ) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("memory-info", self.send_message, True) @@ -44,20 +53,32 @@ message = self.create_message() if len(message["memory-info"]): self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): - self.registry.broker.call_if_accepted("memory-info", - self.send_message, urgent) + self.registry.broker.call_if_accepted( + "memory-info", + self.send_message, + urgent, + ) def run(self): self._monitor.ping() new_timestamp = int(self._create_time()) memstats = MemoryStats(self._source_filename) memory_step_data = self._accumulate( - new_timestamp, memstats.free_memory, "accumulate-memory") + new_timestamp, + memstats.free_memory, + "accumulate-memory", + ) swap_step_data = self._accumulate( - new_timestamp, memstats.free_swap, "accumulate-swap") + new_timestamp, + memstats.free_swap, + "accumulate-swap", + ) if memory_step_data and swap_step_data: timestamp = memory_step_data[0] diff -Nru landscape-client-23.02/landscape/client/monitor/monitor.py landscape-client-23.08/landscape/client/monitor/monitor.py --- landscape-client-23.02/landscape/client/monitor/monitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/monitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ """The Landscape monitor plugin system.""" - import os from landscape.client.broker.client import BrokerClient @@ -10,9 +9,15 @@ name = "monitor" - def __init__(self, reactor, config, persist, persist_filename=None, - step_size=5 * 60): - super(Monitor, self).__init__(reactor, config) + def __init__( + self, + reactor, + config, + persist, + persist_filename=None, + step_size=5 * 60, + ): + super().__init__(reactor, config) self.reactor = reactor self.config = config self.persist = persist @@ -30,5 +35,5 @@ def exchange(self): """Call C{exchange} on all plugins.""" - super(Monitor, self).exchange() + super().exchange() self.flush() diff -Nru landscape-client-23.02/landscape/client/monitor/mountinfo.py landscape-client-23.08/landscape/client/monitor/mountinfo.py --- landscape-client-23.02/landscape/client/monitor/mountinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/mountinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,12 @@ import codecs -import time import os +import time from landscape.client.accumulate import Accumulator -from landscape.lib.disk import get_mount_info, is_device_removable -from landscape.lib.monitor import CoverageMonitor from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.disk import get_mount_info +from landscape.lib.disk import is_device_removable +from landscape.lib.monitor import CoverageMonitor class MountInfo(MonitorPlugin): @@ -15,9 +16,15 @@ max_free_space_items_to_exchange = 200 - def __init__(self, interval=300, monitor_interval=60 * 60, - mounts_file="/proc/mounts", create_time=time.time, - statvfs=None, mtab_file="/etc/mtab"): + def __init__( + self, + interval=300, + monitor_interval=60 * 60, + mounts_file="/proc/mounts", + create_time=time.time, + statvfs=None, + mtab_file="/etc/mtab", + ): self.run_interval = interval self._monitor_interval = monitor_interval self._create_time = create_time @@ -33,21 +40,30 @@ self.is_device_removable = is_device_removable def register(self, registry): - super(MountInfo, self).register(registry) + super().register(registry) self._accumulate = Accumulator(self._persist, self.registry.step_size) - self._monitor = CoverageMonitor(self.run_interval, 0.8, - "mount info snapshot", - create_time=self._create_time) - self.registry.reactor.call_every(self._monitor_interval, - self._monitor.log) + self._monitor = CoverageMonitor( + self.run_interval, + 0.8, + "mount info snapshot", + create_time=self._create_time, + ) + self.registry.reactor.call_every( + self._monitor_interval, + self._monitor.log, + ) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("mount-info", self.send_messages, True) def create_messages(self): - return [message - for message in [self.create_mount_info_message(), - self.create_free_space_message()] - if message is not None] + return [ + message + for message in [ + self.create_mount_info_message(), + self.create_free_space_message(), + ] + if message is not None + ] def create_mount_info_message(self): if self._mount_info: @@ -60,24 +76,27 @@ def create_free_space_message(self): if self._free_space: items_to_exchange = self._free_space[ - :self.max_free_space_items_to_exchange] - message = {"type": "free-space", - "free-space": items_to_exchange} + : self.max_free_space_items_to_exchange + ] + message = {"type": "free-space", "free-space": items_to_exchange} self._free_space = self._free_space[ - self.max_free_space_items_to_exchange:] + self.max_free_space_items_to_exchange : + ] return message return None def send_messages(self, urgent=False): for message in self.create_messages(): d = self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) if message["type"] == "mount-info": d.addCallback(lambda x: self.persist_mount_info()) def exchange(self): - self.registry.broker.call_if_accepted("mount-info", - self.send_messages) + self.registry.broker.call_if_accepted("mount-info", self.send_messages) def persist_mount_info(self): for timestamp, mount_info in self._mount_info_to_persist: @@ -118,11 +137,12 @@ for info in get_mount_info(self._mounts_file, self._statvfs): device = info["device"] mount_point = info["mount-point"] - if (device.startswith("/dev/") and - not mount_point.startswith("/dev/") and - not self.is_device_removable(device) and - mount_point not in bound_mount_points - ): + if ( + device.startswith("/dev/") + and not mount_point.startswith("/dev/") + and not self.is_device_removable(device) + and mount_point not in bound_mount_points + ): yield info diff -Nru landscape-client-23.02/landscape/client/monitor/networkactivity.py landscape-client-23.08/landscape/client/monitor/networkactivity.py --- landscape-client-23.02/landscape/client/monitor/networkactivity.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/networkactivity.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,13 +2,12 @@ A monitor that collects data on network activity, and sends messages with the inbound/outbound traffic per interface per step interval. """ - import time -from landscape.lib.network import get_network_traffic, is_64 from landscape.client.accumulate import Accumulator - from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.network import get_network_traffic +from landscape.lib.network import is_64 class NetworkActivity(MonitorPlugin): @@ -24,8 +23,11 @@ max_network_items_to_exchange = 200 - def __init__(self, network_activity_file="/proc/net/dev", - create_time=time.time): + def __init__( + self, + network_activity_file="/proc/net/dev", + create_time=time.time, + ): self._source_file = network_activity_file # accumulated values for sending out via message self._network_activity = {} @@ -37,7 +39,7 @@ self._rollover_maxint = pow(2, 32) def register(self, registry): - super(NetworkActivity, self).register(registry) + super().register(registry) self._accumulate = Accumulator(self._persist, self.registry.step_size) self.call_on_accepted("network-activity", self.exchange, True) @@ -66,11 +68,17 @@ if not message: return self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): - self.registry.broker.call_if_accepted("network-activity", - self.send_message, urgent) + self.registry.broker.call_if_accepted( + "network-activity", + self.send_message, + urgent, + ) def _traffic_delta(self, new_traffic): """ @@ -97,7 +105,9 @@ yield interface, delta_out, delta_in self._last_activity[interface] = ( - traffic["send_bytes"], traffic["recv_bytes"]) + traffic["send_bytes"], + traffic["recv_bytes"], + ) # We need cast the keys to a list as the size of the dictionary changes # on delete and the .keys() generator throws an error. @@ -114,10 +124,16 @@ new_traffic = get_network_traffic(self._source_file) for interface, delta_out, delta_in in self._traffic_delta(new_traffic): out_step_data = self._accumulate( - new_timestamp, delta_out, "delta-out-%s" % interface) + new_timestamp, + delta_out, + f"delta-out-{interface}", + ) in_step_data = self._accumulate( - new_timestamp, delta_in, "delta-in-%s" % interface) + new_timestamp, + delta_in, + f"delta-in-{interface}", + ) # there's only data when we cross a step boundary if not (in_step_data and out_step_data): @@ -125,4 +141,5 @@ steps = self._network_activity.setdefault(interface, []) steps.append( - (in_step_data[0], int(in_step_data[1]), int(out_step_data[1]))) + (in_step_data[0], int(in_step_data[1]), int(out_step_data[1])), + ) diff -Nru landscape-client-23.02/landscape/client/monitor/networkdevice.py landscape-client-23.08/landscape/client/monitor/networkdevice.py --- landscape-client-23.02/landscape/client/monitor/networkdevice.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/networkdevice.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,9 @@ """ A monitor plugin that collects data on a machine's network devices. """ - from landscape.client.monitor.plugin import DataWatcher -from landscape.lib.network import get_active_device_info from landscape.lib.encoding import encode_if_needed +from landscape.lib.network import get_active_device_info class NetworkDevice(DataWatcher): @@ -15,11 +14,11 @@ scope = "network" def __init__(self, device_info=get_active_device_info): - super(NetworkDevice, self).__init__() + super().__init__() self._device_info = device_info def register(self, registry): - super(NetworkDevice, self).register(registry) + super().register(registry) self.call_on_accepted(self.message_type, self.exchange, True) def get_message(self): @@ -31,7 +30,8 @@ device_speeds = [] for device in device_data: speed_entry = { - "interface": encode_if_needed(device["interface"])} + "interface": encode_if_needed(device["interface"]), + } speed_entry["speed"] = device.pop("speed") speed_entry["duplex"] = device.pop("duplex") device_speeds.append(speed_entry) @@ -39,9 +39,12 @@ device["ip_address"] = encode_if_needed(device["ip_address"]) device["mac_address"] = encode_if_needed(device["mac_address"]) device["broadcast_address"] = encode_if_needed( - device["broadcast_address"]) + device["broadcast_address"], + ) device["netmask"] = encode_if_needed(device["netmask"]) - return {"type": self.message_type, - "devices": device_data, - "device-speeds": device_speeds} + return { + "type": self.message_type, + "devices": device_data, + "device-speeds": device_speeds, + } diff -Nru landscape-client-23.02/landscape/client/monitor/packagemonitor.py landscape-client-23.08/landscape/client/monitor/packagemonitor.py --- landscape-client-23.02/landscape/client/monitor/packagemonitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/packagemonitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,10 +3,10 @@ from twisted.internet.utils import getProcessOutput +from landscape.client.monitor.plugin import MonitorPlugin +from landscape.client.package.reporter import find_reporter_command from landscape.lib.apt.package.store import PackageStore from landscape.lib.encoding import encode_values -from landscape.client.package.reporter import find_reporter_command -from landscape.client.monitor.plugin import MonitorPlugin class PackageMonitor(MonitorPlugin): @@ -17,7 +17,7 @@ _reporter_command = None def __init__(self, package_store_filename=None): - super(PackageMonitor, self).__init__() + super().__init__() if package_store_filename: self._package_store = PackageStore(package_store_filename) else: @@ -29,17 +29,23 @@ if self.config.clones and self.config.is_clone: # Run clones a bit more frequently in order to catch up self.run_interval = 60 # 300 - super(PackageMonitor, self).register(registry) + super().register(registry) if not self._package_store: - filename = os.path.join(registry.config.data_path, - "package/database") + filename = os.path.join( + registry.config.data_path, + "package/database", + ) self._package_store = PackageStore(filename) - registry.register_message("package-ids", - self._enqueue_message_as_reporter_task) - registry.reactor.call_on("server-uuid-changed", - self._server_uuid_changed) + registry.register_message( + "package-ids", + self._enqueue_message_as_reporter_task, + ) + registry.reactor.call_on( + "server-uuid-changed", + self._server_uuid_changed, + ) self.call_on_accepted("packages", self.spawn_reporter) self.run() @@ -59,7 +65,7 @@ def _run_fake_reporter(self, args): """Run a fake-reporter in-process.""" - class FakeFacade(object): + class FakeFacade: """ A fake facade to workaround the issue that the AptFacade essentially allows only once instance per process. @@ -67,34 +73,48 @@ def get_arch(self): arch = os.uname()[-1] - result = {"pentium": "i386", - "i86pc": "i386", - "x86_64": "amd64"}.get(arch) + result = { + "pentium": "i386", + "i86pc": "i386", + "x86_64": "amd64", + }.get(arch) if result: arch = result - elif (arch[0] == "i" and arch.endswith("86")): + elif arch[0] == "i" and arch.endswith("86"): arch = "i386" return arch if getattr(self, "_fake_reporter", None) is None: from landscape.client.package.reporter import ( - FakeReporter, PackageReporterConfiguration) + FakeReporter, + PackageReporterConfiguration, + ) from landscape.lib.apt.package.store import FakePackageStore + package_facade = FakeFacade() package_config = PackageReporterConfiguration() - package_config.load(args + ["-d", self.config.data_path, - "-l", self.config.log_dir]) + package_config.load( + args + + ["-d", self.config.data_path, "-l", self.config.log_dir], + ) package_store = FakePackageStore(package_config.store_filename) - self._fake_reporter = FakeReporter(package_store, package_facade, - self.registry.broker, - package_config) + self._fake_reporter = FakeReporter( + package_store, + package_facade, + self.registry.broker, + package_config, + ) self._fake_reporter.global_store_filename = os.path.join( - self.config.master_data_path, "package", "database") + self.config.master_data_path, + "package", + "database", + ) self._fake_reporter_running = False if self._fake_reporter_running: from twisted.internet.defer import succeed + return succeed(None) self._fake_reporter_running = True @@ -123,16 +143,19 @@ # path is set to None so that getProcessOutput does not # chdir to "." see bug #211373 env = encode_values(env) - result = getProcessOutput(self._reporter_command, - args=args, env=env, - errortoo=1, - path=None) + result = getProcessOutput( + self._reporter_command, + args=args, + env=env, + errortoo=1, + path=None, + ) result.addCallback(self._got_reporter_output) return result def _got_reporter_output(self, output): if output: - logging.warning("Package reporter output:\n%s" % output) + logging.warning(f"Package reporter output:\n{output}") def _reset(self): """ @@ -146,8 +169,10 @@ resynchronize task and not causing sqlite to reset the serial key. """ - task = self._package_store.add_task("reporter", - {"type": "resynchronize"}) + task = self._package_store.add_task( + "reporter", + {"type": "resynchronize"}, + ) self._package_store.clear_tasks(except_tasks=(task,)) def _server_uuid_changed(self, old_uuid, new_uuid): diff -Nru landscape-client-23.02/landscape/client/monitor/plugin.py landscape-client-23.08/landscape/client/monitor/plugin.py --- landscape-client-23.02/landscape/client/monitor/plugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/plugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,9 +2,9 @@ from twisted.internet.defer import succeed +from landscape.client.broker.client import BrokerClientPlugin from landscape.lib.format import format_object from landscape.lib.log import log_failure -from landscape.client.broker.client import BrokerClientPlugin class MonitorPlugin(BrokerClientPlugin): @@ -17,7 +17,7 @@ scope = None def register(self, monitor): - super(MonitorPlugin, self).register(monitor) + super().register(monitor) if self.persist_name is not None: self._persist = self.monitor.persist.root_at(self.persist_name) else: @@ -65,10 +65,15 @@ def send_message(self, urgent): message = self.get_message() if message is not None: - info("Queueing a message with updated data watcher info " - "for %s.", format_object(self)) + info( + "Queueing a message with updated data watcher info " + f"for {format_object(self)}.", + ) result = self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def persist_data(message_id): self.persist_data() @@ -90,5 +95,8 @@ Conditionally add a message to the message store if new data is available. """ - return self.registry.broker.call_if_accepted(self.message_type, - self.send_message, urgent) + return self.registry.broker.call_if_accepted( + self.message_type, + self.send_message, + urgent, + ) diff -Nru landscape-client-23.02/landscape/client/monitor/processorinfo.py landscape-client-23.08/landscape/client/monitor/processorinfo.py --- landscape-client-23.02/landscape/client/monitor/processorinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/processorinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,8 +2,8 @@ import os import re -from landscape.lib.plugin import PluginConfigError from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.plugin import PluginConfigError class ProcessorInfo(MonitorPlugin): @@ -37,16 +37,22 @@ # Prevent the Plugin base-class from scheduling looping calls. run_interval = None - def __init__(self, delay=2, machine_name=None, - source_filename="/proc/cpuinfo"): + def __init__( + self, + delay=2, + machine_name=None, + source_filename="/proc/cpuinfo", + ): self._delay = delay self._source_filename = source_filename if machine_name is None: machine_name = os.uname()[4] - self._cpu_info_reader = self._create_cpu_info_reader(machine_name, - source_filename) + self._cpu_info_reader = self._create_cpu_info_reader( + machine_name, + source_filename, + ) def _create_cpu_info_reader(self, machine_name, source_filename): """Return a message factory suitable for the specified machine name.""" @@ -56,19 +62,22 @@ if regexp.match(machine_name): return pair[1](source_filename) - raise PluginConfigError("A processor info reader for '%s' is not " - "available." % machine_name) + raise PluginConfigError( + f"A processor info reader for '{machine_name}' is not available.", + ) def register(self, registry): """Register this plugin with the specified plugin registry.""" - super(ProcessorInfo, self).register(registry) + super().register(registry) self.registry.reactor.call_later(self._delay, self.run) self.call_on_accepted("processor-info", self.send_message, True) def create_message(self): """Retrieve processor information and generate a message.""" - return {"type": "processor-info", - "processors": self._cpu_info_reader.create_message()} + return { + "type": "processor-info", + "processors": self._cpu_info_reader.create_message(), + } def send_message(self, urgent=False): dirty = False @@ -90,12 +99,18 @@ logging.info("Queueing updated processor info. Contents:") logging.info(message) self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def run(self, urgent=False): """Create a message and put it on the message queue.""" - self.registry.broker.call_if_accepted("processor-info", - self.send_message, urgent) + self.registry.broker.call_if_accepted( + "processor-info", + self.send_message, + urgent, + ) def _has_changed(self, processor, message): """Returns true if processor details changed since the last read.""" @@ -116,8 +131,10 @@ processor["model"] = message["model"] processor["cache_size"] = message.get("cache-size", -1) processor["vendor"] = message.get("vendor", "") - self._persist.set(("processor", str(message["processor-id"])), - processor) + self._persist.set( + ("processor", str(message["processor-id"])), + processor, + ) class PowerPCMessageFactory: @@ -218,8 +235,10 @@ model = parts[1].strip() elif regexp.match(key): start, end = re.compile(r"\d+").search(key).span() - message = {"processor-id": int(key[start:end]), - "model": model} + message = { + "processor-id": int(key[start:end]), + "model": model, + } processors.append(message) finally: file.close() @@ -352,9 +371,11 @@ return processors -message_factories = [("(arm*|aarch64)", ARMMessageFactory), - ("ppc(64)?", PowerPCMessageFactory), - ("sparc[64]", SparcMessageFactory), - ("i[3-7]86|x86_64", X86MessageFactory), - ("s390x", S390XMessageFactory), - ("riscv64", RISCVMessageFactory)] +message_factories = [ + ("(arm*|aarch64)", ARMMessageFactory), + ("ppc(64)?", PowerPCMessageFactory), + ("sparc[64]", SparcMessageFactory), + ("i[3-7]86|x86_64", X86MessageFactory), + ("s390x", S390XMessageFactory), + ("riscv64", RISCVMessageFactory), +] diff -Nru landscape-client-23.02/landscape/client/monitor/rebootrequired.py landscape-client-23.08/landscape/client/monitor/rebootrequired.py --- landscape-client-23.02/landscape/client/monitor/rebootrequired.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/rebootrequired.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,8 @@ -import os import logging +import os -from landscape.lib.fs import read_text_file from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.fs import read_text_file REBOOT_REQUIRED_FILENAME = "/var/run/reboot-required" @@ -35,7 +35,7 @@ return [] lines = read_text_file(self._packages_filename).splitlines() - packages = set(line.strip() for line in lines if line) + packages = {line.strip() for line in lines if line} return sorted(packages) def _create_message(self): @@ -59,12 +59,18 @@ message = self._create_message() if message: message["type"] = "reboot-required-info" - logging.info("Queueing message with updated " - "reboot-required status.") - self.registry.broker.send_message(message, self._session_id, - urgent=True) + logging.info( + "Queueing message with updated " "reboot-required status.", + ) + self.registry.broker.send_message( + message, + self._session_id, + urgent=True, + ) def run(self): """Send reboot-required messages if the server accepts them.""" return self.registry.broker.call_if_accepted( - "reboot-required-info", self.send_message) + "reboot-required-info", + self.send_message, + ) diff -Nru landscape-client-23.02/landscape/client/monitor/service.py landscape-client-23.08/landscape/client/monitor/service.py --- landscape-client-23.02/landscape/client/monitor/service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,15 @@ """Deployment code for the monitor.""" - +import logging import os from twisted.python.reflect import namedClass -from landscape.client.service import LandscapeService, run_landscape_service +from landscape.client.amp import ComponentPublisher +from landscape.client.broker.amp import RemoteBrokerConnector from landscape.client.monitor.config import MonitorConfiguration from landscape.client.monitor.monitor import Monitor -from landscape.client.broker.amp import RemoteBrokerConnector -from landscape.client.amp import ComponentPublisher +from landscape.client.service import LandscapeService +from landscape.client.service import run_landscape_service class MonitorService(LandscapeService): @@ -21,22 +22,45 @@ def __init__(self, config): self.persist_filename = os.path.join( - config.data_path, "%s.bpickle" % self.service_name) - super(MonitorService, self).__init__(config) + config.data_path, + f"{self.service_name}.bpickle", + ) + super().__init__(config) self.plugins = self.get_plugins() - self.monitor = Monitor(self.reactor, self.config, self.persist, - persist_filename=self.persist_filename) - self.publisher = ComponentPublisher(self.monitor, self.reactor, - self.config) + self.monitor = Monitor( + self.reactor, + self.config, + self.persist, + persist_filename=self.persist_filename, + ) + self.publisher = ComponentPublisher( + self.monitor, + self.reactor, + self.config, + ) def get_plugins(self): - return [namedClass("landscape.client.monitor.%s.%s" - % (plugin_name.lower(), plugin_name))() - for plugin_name in self.config.plugin_factories] + plugins = [] + + for plugin_name in self.config.plugin_factories: + try: + plugin = namedClass( + "landscape.client.monitor." + f"{plugin_name.lower()}.{plugin_name}" + ) + plugins.append(plugin()) + except ModuleNotFoundError: + logging.warning( + "Invalid monitor plugin specified: '{}'. " + "See `example.conf` for a full list of monitor plugins.", + plugin_name, + ) + + return plugins - def startService(self): + def startService(self): # noqa: N802 """Start the monitor.""" - super(MonitorService, self).startService() + super().startService() self.publisher.start() def start_plugins(broker): @@ -50,7 +74,7 @@ connected = self.connector.connect() return connected.addCallback(start_plugins) - def stopService(self): + def stopService(self): # noqa: N802 """Stop the monitor. The monitor is flushed to ensure that things like persist databases @@ -59,7 +83,7 @@ deferred = self.publisher.stop() self.monitor.flush() self.connector.disconnect() - super(MonitorService, self).stopService() + super().stopService() return deferred diff -Nru landscape-client-23.02/landscape/client/monitor/snapmonitor.py landscape-client-23.08/landscape/client/monitor/snapmonitor.py --- landscape-client-23.02/landscape/client/monitor/snapmonitor.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/snapmonitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,44 @@ +import logging + +from landscape.client.monitor.plugin import DataWatcher +from landscape.client.snap.http import SnapdHttpException +from landscape.client.snap.http import SnapHttp +from landscape.message_schemas.server_bound import SNAPS + + +class SnapMonitor(DataWatcher): + + message_type = "snaps" + message_key = message_type + persist_name = message_type + scope = "snaps" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._snap_http = SnapHttp() + + def register(self, registry): + self.config = registry.config + # The default interval is 30 minutes. + self.run_interval = self.config.snap_monitor_interval + + super(SnapMonitor, self).register(registry) + + def get_data(self): + try: + snaps = self._snap_http.get_snaps() + except SnapdHttpException as e: + logging.error(f"Unable to list installed snaps: {e}") + return + + # We get a lot of extra info from snapd. To avoid caching it all + # or invalidating the cache on timestamp changes, we use Message + # coercion to strip out the unnecessaries, then sort on the snap + # IDs to order the list. + data = SNAPS.coerce( + {"type": "snaps", "snaps": {"installed": snaps["result"]}}, + ) + data["snaps"]["installed"].sort(key=lambda x: x["id"]) + + return data["snaps"] diff -Nru landscape-client-23.02/landscape/client/monitor/swiftusage.py landscape-client-23.08/landscape/client/monitor/swiftusage.py --- landscape-client-23.02/landscape/client/monitor/swiftusage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/swiftusage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,20 @@ import logging -import time import os +import time from twisted.internet import threads from landscape.client.accumulate import Accumulator +from landscape.client.monitor.plugin import MonitorPlugin from landscape.lib.monitor import CoverageMonitor from landscape.lib.network import get_active_device_info -from landscape.client.monitor.plugin import MonitorPlugin try: from swift.common.ring import Ring from swift.cli.recon import Scout + has_swift = True -except ImportError: +except (ImportError, AttributeError): has_swift = False @@ -28,9 +29,13 @@ persist_name = "swift-usage" scope = "storage" - def __init__(self, interval=30, monitor_interval=60 * 60, - create_time=time.time, - swift_ring="/etc/swift/object.ring.gz"): + def __init__( + self, + interval=30, + monitor_interval=60 * 60, + create_time=time.time, + swift_ring="/etc/swift/object.ring.gz", + ): self._interval = interval self._monitor_interval = monitor_interval self._create_time = create_time @@ -41,13 +46,18 @@ self.active = True def register(self, registry): - super(SwiftUsage, self).register(registry) + super().register(registry) self._accumulate = Accumulator(self._persist, self._interval) self._monitor = CoverageMonitor( - self.run_interval, 0.8, "Swift device usage snapshot", - create_time=self._create_time) + self.run_interval, + 0.8, + "Swift device usage snapshot", + create_time=self._create_time, + ) self.registry.reactor.call_every( - self._monitor_interval, self._monitor.log) + self._monitor_interval, + self._monitor.log, + ) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("swift-usage", self.send_message, True) @@ -62,11 +72,17 @@ message = self.create_message() if message: self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): self.registry.broker.call_if_accepted( - "swift-usage", self.send_message, urgent) + "swift-usage", + self.send_message, + urgent, + ) def run(self): if not self._should_run(): @@ -87,7 +103,8 @@ if not self._has_swift: logging.info( "This machine does not appear to be a Swift machine. " - "Deactivating plugin.") + "Deactivating plugin.", + ) self.active = False return False @@ -108,8 +125,7 @@ def _get_local_ips(self): """Return a list of IP addresses for local devices.""" - return [ - device["ip_address"] for device in get_active_device_info()] + return [device["ip_address"] for device in get_active_device_info()] def _perform_recon_call(self, host): """Get usage information from Swift Recon service.""" @@ -142,9 +158,12 @@ for key in ("size", "avail", "used"): # Store values in tree so it's easy to delete all values for a # device - persist_key = "usage.%s.%s" % (device, key) + persist_key = f"usage.{device}.{key}" step_value = self._accumulate( - timestamp, usage[key], persist_key) + timestamp, + usage[key], + persist_key, + ) step_values.append(step_value) if all(step_values): @@ -155,5 +174,5 @@ # Update device list and remove usage for devices that no longer exist. current_devices = set(self._persist.get("devices", ())) for device in current_devices - devices: - self._persist.remove("usage.%s" % device) + self._persist.remove(f"usage.{device}") self._persist.set("devices", list(devices)) diff -Nru landscape-client-23.02/landscape/client/monitor/temperature.py landscape-client-23.08/landscape/client/monitor/temperature.py --- landscape-client-23.02/landscape/client/monitor/temperature.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/temperature.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,9 @@ import time -from landscape.lib.monitor import CoverageMonitor -from landscape.lib.sysstats import get_thermal_zones - from landscape.client.accumulate import Accumulator from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.monitor import CoverageMonitor +from landscape.lib.sysstats import get_thermal_zones class Temperature(MonitorPlugin): @@ -15,8 +14,13 @@ # Prevent the Plugin base-class from scheduling looping calls. run_interval = None - def __init__(self, interval=30, monitor_interval=60 * 60, - thermal_zone_path=None, create_time=time.time): + def __init__( + self, + interval=30, + monitor_interval=60 * 60, + thermal_zone_path=None, + create_time=time.time, + ): self.thermal_zone_path = thermal_zone_path self._interval = interval self._monitor_interval = monitor_interval @@ -29,18 +33,25 @@ self._temperatures[thermal_zone.name] = [] def register(self, registry): - super(Temperature, self).register(registry) + super().register(registry) if self._thermal_zones: - self._accumulate = Accumulator(self._persist, - self.registry.step_size) + self._accumulate = Accumulator( + self._persist, + self.registry.step_size, + ) registry.reactor.call_every(self._interval, self.run) - self._monitor = CoverageMonitor(self._interval, 0.8, - "temperature snapshot", - create_time=self._create_time) - registry.reactor.call_every(self._monitor_interval, - self._monitor.log) + self._monitor = CoverageMonitor( + self._interval, + 0.8, + "temperature snapshot", + create_time=self._create_time, + ) + registry.reactor.call_every( + self._monitor_interval, + self._monitor.log, + ) registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("temperature", self.exchange, True) @@ -51,18 +62,29 @@ self._temperatures[zone] = [] if not temperatures: continue - messages.append({"type": "temperature", "thermal-zone": zone, - "temperatures": temperatures}) + messages.append( + { + "type": "temperature", + "thermal-zone": zone, + "temperatures": temperatures, + }, + ) return messages def send_messages(self, urgent): for message in self.create_messages(): self.registry.broker.send_message( - message, self._session_id, urgent=urgent) + message, + self._session_id, + urgent=urgent, + ) def exchange(self, urgent=False): - self.registry.broker.call_if_accepted("temperature", - self.send_messages, urgent) + self.registry.broker.call_if_accepted( + "temperature", + self.send_messages, + urgent, + ) def run(self): self._monitor.ping() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_activeprocessinfo.py landscape-client-23.08/landscape/client/monitor/tests/test_activeprocessinfo.py --- landscape-client-23.02/landscape/client/monitor/tests/test_activeprocessinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_activeprocessinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,18 @@ import operator import os import shutil -import tempfile import subprocess +import tempfile +from unittest.mock import ANY +from unittest.mock import Mock +from unittest.mock import patch from twisted.internet.defer import fail -from landscape.lib.testing import ProcessDataBuilder from landscape.client.monitor.activeprocessinfo import ActiveProcessInfo -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -from mock import ANY, Mock, patch +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.testing import ProcessDataBuilder class ActiveProcessInfoTest(LandscapeTest): @@ -42,17 +45,27 @@ def test_only_first_run_includes_kill_message(self): """Test ensures that only the first run queues a kill message.""" - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=10, - process_name="blarpy") + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="blarpy", + ) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) self.monitor.exchange() - self.builder.create_data(671, self.builder.STOPPED, uid=1000, - gid=1000, started_after_boot=15, - process_name="blargh") + self.builder.create_data( + 671, + self.builder.STOPPED, + uid=1000, + gid=1000, + started_after_boot=15, + process_name="blargh", + ) self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) @@ -98,48 +111,101 @@ def test_read_sample_data(self): """Test reading a sample set of process data.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1030, process_name="init") - self.builder.create_data(671, self.builder.STOPPED, uid=1000, - gid=1000, started_after_boot=1110, - process_name="blargh") - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=1120, - process_name="blarpy") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1030, + process_name="init", + ) + self.builder.create_data( + 671, + self.builder.STOPPED, + uid=1000, + gid=1000, + started_after_boot=1110, + process_name="blargh", + ) + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=1120, + process_name="blarpy", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) - expected_process_0 = {"state": b"R", "gid": 0, "pid": 1, - "vm-size": 11676, "name": "init", "uid": 0, - "start-time": 103, "percent-cpu": 0.0} - expected_process_1 = {"state": b"T", "gid": 1000, "pid": 671, - "vm-size": 11676, "name": "blargh", "uid": 1000, - "start-time": 111, "percent-cpu": 0.0} - expected_process_2 = {"state": b"t", "gid": 1000, "pid": 672, - "vm-size": 11676, "name": "blarpy", "uid": 1000, - "start-time": 112, "percent-cpu": 0.0} + expected_process_0 = { + "state": b"R", + "gid": 0, + "pid": 1, + "vm-size": 11676, + "name": "init", + "uid": 0, + "start-time": 103, + "percent-cpu": 0.0, + } + expected_process_1 = { + "state": b"T", + "gid": 1000, + "pid": 671, + "vm-size": 11676, + "name": "blargh", + "uid": 1000, + "start-time": 111, + "percent-cpu": 0.0, + } + expected_process_2 = { + "state": b"t", + "gid": 1000, + "pid": 672, + "vm-size": 11676, + "name": "blarpy", + "uid": 1000, + "start-time": 112, + "percent-cpu": 0.0, + } processes = message["add-processes"] processes.sort(key=operator.itemgetter("pid")) - self.assertEqual(processes, [expected_process_0, expected_process_1, - expected_process_2]) + self.assertEqual( + processes, + [expected_process_0, expected_process_1, expected_process_2], + ) def test_skip_non_numeric_subdirs(self): """Test ensures the plugin doesn't touch non-process dirs in /proc.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1120, process_name="init") + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1120, + process_name="init", + ) directory = os.path.join(self.sample_dir, "acpi") os.mkdir(directory) self.assertTrue(os.path.isdir(directory)) - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] @@ -147,51 +213,107 @@ self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) - expected_process = {"pid": 1, "state": b"R", "name": "init", - "vm-size": 11676, "uid": 0, "gid": 0, - "start-time": 112, "percent-cpu": 0.0} + expected_process = { + "pid": 1, + "state": b"R", + "name": "init", + "vm-size": 11676, + "uid": 0, + "gid": 0, + "start-time": 112, + "percent-cpu": 0.0, + } self.assertEqual(message["add-processes"], [expected_process]) def test_plugin_manager(self): """Test plugin manager integration.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1100, process_name="init") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1100, + process_name="init", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) self.monitor.exchange() self.assertMessages( self.mstore.get_pending_messages(), - [{"type": "active-process-info", - "kill-all-processes": True, - "add-processes": [{"pid": 1, "state": b"R", "name": "init", - "vm-size": 11676, "uid": 0, "gid": 0, - "start-time": 110, "percent-cpu": 0.0}]}]) + [ + { + "type": "active-process-info", + "kill-all-processes": True, + "add-processes": [ + { + "pid": 1, + "state": b"R", + "name": "init", + "vm-size": 11676, + "uid": 0, + "gid": 0, + "start-time": 110, + "percent-cpu": 0.0, + }, + ], + }, + ], + ) def test_process_terminated(self): """Test that the plugin handles process changes in a diff-like way.""" # This test is *too big* - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1010, process_name="init") - self.builder.create_data(671, self.builder.STOPPED, uid=1000, - gid=1000, started_after_boot=1020, - process_name="blargh") - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=1040, - process_name="blarpy") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1010, + process_name="init", + ) + self.builder.create_data( + 671, + self.builder.STOPPED, + uid=1000, + gid=1000, + started_after_boot=1020, + process_name="blargh", + ) + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=1040, + process_name="blarpy", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() # Terminate a process and start another. self.builder.remove_data(671) - self.builder.create_data(12753, self.builder.RUNNING, - uid=0, gid=0, started_after_boot=1070, - process_name="wubble") + self.builder.create_data( + 12753, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1070, + process_name="wubble", + ) plugin.exchange() messages = self.mstore.get_pending_messages() @@ -204,22 +326,42 @@ self.assertTrue("kill-all-processes" in message) self.assertEqual(message["kill-all-processes"], True) self.assertTrue("add-processes" in message) - expected_process_0 = {"state": b"R", "gid": 0, "pid": 1, - "vm-size": 11676, "name": "init", - "uid": 0, "start-time": 101, - "percent-cpu": 0.0} - expected_process_1 = {"state": b"T", "gid": 1000, "pid": 671, - "vm-size": 11676, "name": "blargh", - "uid": 1000, "start-time": 102, - "percent-cpu": 0.0} - expected_process_2 = {"state": b"t", "gid": 1000, "pid": 672, - "vm-size": 11676, "name": "blarpy", - "uid": 1000, "start-time": 104, - "percent-cpu": 0.0} + expected_process_0 = { + "state": b"R", + "gid": 0, + "pid": 1, + "vm-size": 11676, + "name": "init", + "uid": 0, + "start-time": 101, + "percent-cpu": 0.0, + } + expected_process_1 = { + "state": b"T", + "gid": 1000, + "pid": 671, + "vm-size": 11676, + "name": "blargh", + "uid": 1000, + "start-time": 102, + "percent-cpu": 0.0, + } + expected_process_2 = { + "state": b"t", + "gid": 1000, + "pid": 672, + "vm-size": 11676, + "name": "blarpy", + "uid": 1000, + "start-time": 104, + "percent-cpu": 0.0, + } processes = message["add-processes"] processes.sort(key=operator.itemgetter("pid")) - self.assertEqual(processes, [expected_process_0, expected_process_1, - expected_process_2]) + self.assertEqual( + processes, + [expected_process_0, expected_process_1, expected_process_2], + ) # Report diff-like changes to processes, such as terminated # processes and new processes. @@ -228,10 +370,16 @@ self.assertTrue("add-processes" in message) self.assertEqual(len(message["add-processes"]), 1) - expected_process = {"state": b"R", "gid": 0, "pid": 12753, - "vm-size": 11676, "name": "wubble", - "uid": 0, "start-time": 107, - "percent-cpu": 0.0} + expected_process = { + "state": b"R", + "gid": 0, + "pid": 12753, + "vm-size": 11676, + "name": "wubble", + "uid": 0, + "start-time": 107, + "percent-cpu": 0.0, + } self.assertEqual(message["add-processes"], [expected_process]) self.assertTrue("kill-processes" in message) @@ -240,9 +388,14 @@ def test_only_queue_message_when_process_data_is_available(self): """Test ensures that messages are only queued when data changes.""" - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=10, - process_name="blarpy") + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="blarpy", + ) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) @@ -255,27 +408,62 @@ def test_only_report_active_processes(self): """Test ensures the plugin only reports active processes.""" - self.builder.create_data(672, self.builder.DEAD, - uid=1000, gid=1000, started_after_boot=10, - process_name="blarpy") - self.builder.create_data(673, self.builder.ZOMBIE, - uid=1000, gid=1000, started_after_boot=12, - process_name="blarpitty") - self.builder.create_data(674, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=13, - process_name="blarpie") - self.builder.create_data(675, self.builder.STOPPED, - uid=1000, gid=1000, started_after_boot=14, - process_name="blarping") - self.builder.create_data(676, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=15, - process_name="floerp") - self.builder.create_data(677, self.builder.DISK_SLEEP, - uid=1000, gid=1000, started_after_boot=18, - process_name="floerpidity") - self.builder.create_data(678, self.builder.SLEEPING, - uid=1000, gid=1000, started_after_boot=21, - process_name="floerpiditting") + self.builder.create_data( + 672, + self.builder.DEAD, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="blarpy", + ) + self.builder.create_data( + 673, + self.builder.ZOMBIE, + uid=1000, + gid=1000, + started_after_boot=12, + process_name="blarpitty", + ) + self.builder.create_data( + 674, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=13, + process_name="blarpie", + ) + self.builder.create_data( + 675, + self.builder.STOPPED, + uid=1000, + gid=1000, + started_after_boot=14, + process_name="blarping", + ) + self.builder.create_data( + 676, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=15, + process_name="floerp", + ) + self.builder.create_data( + 677, + self.builder.DISK_SLEEP, + uid=1000, + gid=1000, + started_after_boot=18, + process_name="floerpidity", + ) + self.builder.create_data( + 678, + self.builder.SLEEPING, + uid=1000, + gid=1000, + started_after_boot=21, + process_name="floerpiditting", + ) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) @@ -295,9 +483,14 @@ def test_report_interesting_state_changes(self): """Test ensures that interesting state changes are reported.""" - self.builder.create_data(672, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=10, - process_name="blarpy") + self.builder.create_data( + 672, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="blarpy", + ) # Report a running process. plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) @@ -317,9 +510,14 @@ # Convert the process to a zombie and ensure it gets reported. self.builder.remove_data(672) - self.builder.create_data(672, self.builder.ZOMBIE, - uid=1000, gid=1000, started_after_boot=10, - process_name="blarpy") + self.builder.create_data( + 672, + self.builder.ZOMBIE, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="blarpy", + ) plugin.exchange() @@ -336,12 +534,18 @@ L{MonitorPlugin}-based plugins can provide a callable to call when a message type becomes accepted. """ - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10) + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + ) self.monitor.add(plugin) self.assertEqual(len(self.mstore.get_pending_messages()), 0) result = self.monitor.fire_event( - "message-type-acceptance-changed", "active-process-info", True) + "message-type-acceptance-changed", + "active-process-info", + True, + ) def assert_messages(ignored): self.assertEqual(len(self.mstore.get_pending_messages()), 1) @@ -354,49 +558,80 @@ When a C{resynchronize} event occurs, with 'process' scope, we should clear the information held in memory by the activeprocess monitor. """ - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1030, process_name="init") - self.builder.create_data(671, self.builder.STOPPED, uid=1000, - gid=1000, started_after_boot=1110, - process_name="blargh") - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=1120, - process_name="blarpy") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1030, + process_name="init", + ) + self.builder.create_data( + 671, + self.builder.STOPPED, + uid=1000, + gid=1000, + started_after_boot=1110, + process_name="blargh", + ) + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=1120, + process_name="blarpy", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() - expected_messages = [{"add-processes": [ - {"gid": 1000, - "name": u"blarpy", - "pid": 672, - "start-time": 112, - "state": b"t", - "uid": 1000, - "vm-size": 11676, - "percent-cpu": 0.0}, - {"gid": 0, - "name": u"init", - "pid": 1, - "start-time": 103, - "state": b"R", - "uid": 0, - "vm-size": 11676, - "percent-cpu": 0.0}, - {"gid": 1000, - "name": u"blargh", - "pid": 671, - "start-time": 111, - "state": b"T", - "uid": 1000, - "vm-size": 11676, - "percent-cpu": 0.0}], - "kill-all-processes": True, - "type": "active-process-info"}] + expected_messages = [ + { + "add-processes": [ + { + "gid": 1000, + "name": "blarpy", + "pid": 672, + "start-time": 112, + "state": b"t", + "uid": 1000, + "vm-size": 11676, + "percent-cpu": 0.0, + }, + { + "gid": 0, + "name": "init", + "pid": 1, + "start-time": 103, + "state": b"R", + "uid": 0, + "vm-size": 11676, + "percent-cpu": 0.0, + }, + { + "gid": 1000, + "name": "blargh", + "pid": 671, + "start-time": 111, + "state": b"T", + "uid": 1000, + "vm-size": 11676, + "percent-cpu": 0.0, + }, + ], + "kill-all-processes": True, + "type": "active-process-info", + }, + ] self.assertMessages(messages, expected_messages) @@ -419,8 +654,12 @@ When a C{resynchronize} event occurs a new session id is acquired so that future messages can be sent. """ - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) session_id = plugin._session_id plugin.client.broker.message_store.drop_session_ids() @@ -433,28 +672,44 @@ When a C{resynchronize} event occurs the L{_reset} method should be called on L{ActiveProcessInfo}. """ - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=1120, - process_name="blarpy") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=1120, + process_name="blarpy", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() - expected_messages = [{"add-processes": [ - {"gid": 1000, - "name": u"blarpy", - "pid": 672, - "start-time": 112, - "state": b"t", - "uid": 1000, - "vm-size": 11676, - "percent-cpu": 0.0}], - "kill-all-processes": True, - "type": "active-process-info"}] + expected_messages = [ + { + "add-processes": [ + { + "gid": 1000, + "name": "blarpy", + "pid": 672, + "start-time": 112, + "state": b"t", + "uid": 1000, + "vm-size": 11676, + "percent-cpu": 0.0, + }, + ], + "kill-all-processes": True, + "type": "active-process-info", + }, + ] self.assertMessages(messages, expected_messages) @@ -476,28 +731,44 @@ When a C{resynchronize} event occurs, with an irrelevant scope, we should do nothing. """ - self.builder.create_data(672, self.builder.TRACING_STOP, - uid=1000, gid=1000, started_after_boot=1120, - process_name="blarpy") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 672, + self.builder.TRACING_STOP, + uid=1000, + gid=1000, + started_after_boot=1120, + process_name="blarpy", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() - expected_messages = [{"add-processes": [ - {"gid": 1000, - "name": u"blarpy", - "pid": 672, - "start-time": 112, - "state": b"t", - "uid": 1000, - "vm-size": 11676, - "percent-cpu": 0.0}], - "kill-all-processes": True, - "type": "active-process-info"}] + expected_messages = [ + { + "add-processes": [ + { + "gid": 1000, + "name": "blarpy", + "pid": 672, + "start-time": 112, + "state": b"t", + "uid": 1000, + "vm-size": 11676, + "percent-cpu": 0.0, + }, + ], + "kill-all-processes": True, + "type": "active-process-info", + }, + ] self.assertMessages(messages, expected_messages) @@ -527,14 +798,20 @@ self.log_helper.ignore_errors(MyException) - self.builder.create_data(672, self.builder.RUNNING, - uid=1000, gid=1000, started_after_boot=10, - process_name="python") + self.builder.create_data( + 672, + self.builder.RUNNING, + uid=1000, + gid=1000, + started_after_boot=10, + process_name="python", + ) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) self.monitor.broker.send_message = Mock( - return_value=fail(MyException())) + return_value=fail(MyException()), + ) message = plugin.get_message() @@ -544,19 +821,32 @@ result = plugin.exchange() result.addCallback(assert_message) self.monitor.broker.send_message.assert_called_once_with( - ANY, ANY, urgent=ANY) + ANY, + ANY, + urgent=ANY, + ) return result def test_process_updates(self): """Test updates to processes are successfully reported.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1100, process_name="init",) - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1100, + process_name="init", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) - with patch.object(plugin.registry, 'flush') as flush_mock: + with patch.object(plugin.registry, "flush") as flush_mock: plugin.exchange() flush_mock.assert_called_once_with() @@ -566,38 +856,60 @@ self.assertEqual(len(messages), 1) self.builder.remove_data(1) - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1100, - process_name="init", vmsize=20000) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1100, + process_name="init", + vmsize=20000, + ) plugin.exchange() flush_mock.assert_called_once_with() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) - self.assertMessages(messages, [{"timestamp": 0, - "api": b"3.2", - "type": "active-process-info", - "kill-all-processes": True, - "add-processes": [{"start-time": 110, - "name": u"init", - "pid": 1, - "percent-cpu": 0.0, - "state": b"R", - "gid": 0, - "vm-size": 11676, - "uid": 0}]}, - {"timestamp": 0, - "api": b"3.2", - "type": "active-process-info", - "update-processes": [ - {"start-time": 110, - "name": u"init", - "pid": 1, - "percent-cpu": 0.0, - "state": b"R", - "gid": 0, - "vm-size": 20000, - "uid": 0}]}]) + self.assertMessages( + messages, + [ + { + "timestamp": 0, + "api": b"3.2", + "type": "active-process-info", + "kill-all-processes": True, + "add-processes": [ + { + "start-time": 110, + "name": "init", + "pid": 1, + "percent-cpu": 0.0, + "state": b"R", + "gid": 0, + "vm-size": 11676, + "uid": 0, + }, + ], + }, + { + "timestamp": 0, + "api": b"3.2", + "type": "active-process-info", + "update-processes": [ + { + "start-time": 110, + "name": "init", + "pid": 1, + "percent-cpu": 0.0, + "state": b"R", + "gid": 0, + "vm-size": 20000, + "uid": 0, + }, + ], + }, + ], + ) class PluginManagerIntegrationTest(LandscapeTest): @@ -608,8 +920,9 @@ LandscapeTest.setUp(self) self.sample_dir = self.makeDir() self.builder = ProcessDataBuilder(self.sample_dir) - self.mstore.set_accepted_types(["active-process-info", - "operation-result"]) + self.mstore.set_accepted_types( + ["active-process-info", "operation-result"], + ) def get_missing_pid(self): popen = subprocess.Popen(["hostname"], stdout=subprocess.PIPE) @@ -617,62 +930,106 @@ return popen.pid def get_active_process(self): - return subprocess.Popen(["python", "-c", "raw_input()"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + return subprocess.Popen( + ["python", "-c", "raw_input()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) def test_read_long_process_name(self): """Test reading a process with a long name.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1030, - process_name="NetworkManagerDaemon") - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=2000, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1030, + process_name="NetworkManagerDaemon", + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=2000, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) - expected_process_0 = {"state": b"R", "gid": 0, "pid": 1, - "vm-size": 11676, "name": "NetworkManagerDaemon", - "uid": 0, "start-time": 103, "percent-cpu": 0.0} + expected_process_0 = { + "state": b"R", + "gid": 0, + "pid": 1, + "vm-size": 11676, + "name": "NetworkManagerDaemon", + "uid": 0, + "start-time": 103, + "percent-cpu": 0.0, + } processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) def test_strip_command_line_name_whitespace(self): """Whitespace should be stripped from command-line names.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=30, - process_name=" postgres: writer process ") - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=30, + process_name=" postgres: writer process ", + ) + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] - self.assertEqual(message["add-processes"][0]["name"], - u"postgres: writer process") + self.assertEqual( + message["add-processes"][0]["name"], + "postgres: writer process", + ) def test_read_process_with_no_cmdline(self): """Test reading a process without a cmdline file.""" - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=1030, - process_name="ProcessWithLongName", - generate_cmd_line=False) - - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, - jiffies=10, boot_time=0) + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=1030, + process_name="ProcessWithLongName", + generate_cmd_line=False, + ) + + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=100, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) - expected_process_0 = {"state": b"R", "gid": 0, "pid": 1, - "vm-size": 11676, "name": "ProcessWithLong", - "uid": 0, "start-time": 103, "percent-cpu": 0.0} + expected_process_0 = { + "state": b"R", + "gid": 0, + "pid": 1, + "vm-size": 11676, + "name": "ProcessWithLong", + "uid": 0, + "start-time": 103, + "percent-cpu": 0.0, + } processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) @@ -681,17 +1038,28 @@ Test that we can calculate the CPU usage from system information and the /proc//stat file. """ - stat_data = "1 Process S 1 0 0 0 0 0 0 0 " \ - "0 0 20 20 0 0 0 0 0 0 3000 0 " \ - "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" - - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=None, - process_name="Process", - generate_cmd_line=False, - stat_data=stat_data) - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=400, - jiffies=10, boot_time=0) + stat_data = ( + "1 Process S 1 0 0 0 0 0 0 0 " + "0 0 20 20 0 0 0 0 0 0 3000 0 " + "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" + ) + + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=None, + process_name="Process", + generate_cmd_line=False, + stat_data=stat_data, + ) + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=400, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] @@ -699,10 +1067,16 @@ self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) processes = message["add-processes"] - expected_process_0 = {"state": b"R", "gid": 0, "pid": 1, - "vm-size": 11676, "name": u"Process", - "uid": 0, "start-time": 300, - "percent-cpu": 4.00} + expected_process_0 = { + "state": b"R", + "gid": 0, + "pid": 1, + "vm-size": 11676, + "name": "Process", + "uid": 0, + "start-time": 300, + "percent-cpu": 4.00, + } processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) @@ -712,17 +1086,28 @@ the /proc//stat file, the CPU usage should be capped at 99%. """ - stat_data = "1 Process S 1 0 0 0 0 0 0 0 " \ - "0 0 500 500 0 0 0 0 0 0 3000 0 " \ - "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" - - self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, - started_after_boot=None, - process_name="Process", - generate_cmd_line=False, - stat_data=stat_data) - plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=400, - jiffies=10, boot_time=0) + stat_data = ( + "1 Process S 1 0 0 0 0 0 0 0 " + "0 0 500 500 0 0 0 0 0 0 3000 0 " + "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" + ) + + self.builder.create_data( + 1, + self.builder.RUNNING, + uid=0, + gid=0, + started_after_boot=None, + process_name="Process", + generate_cmd_line=False, + stat_data=stat_data, + ) + plugin = ActiveProcessInfo( + proc_dir=self.sample_dir, + uptime=400, + jiffies=10, + boot_time=0, + ) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] @@ -730,9 +1115,15 @@ self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) processes = message["add-processes"] - expected_process_0 = {"state": b"R", "gid": 0, "pid": 1, - "vm-size": 11676, "name": u"Process", - "uid": 0, "start-time": 300, - "percent-cpu": 99.00} + expected_process_0 = { + "state": b"R", + "gid": 0, + "pid": 1, + "vm-size": 11676, + "name": "Process", + "uid": 0, + "start-time": 300, + "percent-cpu": 99.00, + } processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_aptpreferences.py landscape-client-23.08/landscape/client/monitor/tests/test_aptpreferences.py --- landscape-client-23.02/landscape/client/monitor/tests/test_aptpreferences.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_aptpreferences.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,5 @@ import os -import mock +from unittest import mock from twisted.python.compat import unicode @@ -13,7 +13,7 @@ helpers = [MonitorHelper] def setUp(self): - super(AptPreferencesTest, self).setUp() + super().setUp() self.etc_apt_directory = self.makeDir() self.plugin = AptPreferences(self.etc_apt_directory) self.monitor.add(self.plugin) @@ -30,19 +30,25 @@ L{AptPreferences.get_data} includes the contents of the main APT preferences file. """ - preferences_filename = os.path.join(self.etc_apt_directory, - "preferences") + preferences_filename = os.path.join( + self.etc_apt_directory, + "preferences", + ) self.makeFile(path=preferences_filename, content="crap") - self.assertEqual(self.plugin.get_data(), - {preferences_filename: "crap"}) + self.assertEqual( + self.plugin.get_data(), + {preferences_filename: "crap"}, + ) def test_get_data_with_empty_preferences_directory(self): """ L{AptPreferences.get_data} returns C{None} if the APT preference directory is present but empty. """ - preferences_directory = os.path.join(self.etc_apt_directory, - "preferences.d") + preferences_directory = os.path.join( + self.etc_apt_directory, + "preferences.d", + ) self.makeDir(path=preferences_directory) self.assertIdentical(self.plugin.get_data(), None) @@ -51,42 +57,60 @@ L{AptPreferences.get_data} includes the contents of all the file in the APT preferences directory. """ - preferences_directory = os.path.join(self.etc_apt_directory, - "preferences.d") + preferences_directory = os.path.join( + self.etc_apt_directory, + "preferences.d", + ) self.makeDir(path=preferences_directory) filename1 = self.makeFile(dirname=preferences_directory, content="foo") filename2 = self.makeFile(dirname=preferences_directory, content="bar") - self.assertEqual(self.plugin.get_data(), {filename1: "foo", - filename2: "bar"}) + self.assertEqual( + self.plugin.get_data(), + {filename1: "foo", filename2: "bar"}, + ) def test_get_data_with_one_big_file(self): """ L{AptPreferences.get_data} truncates the contents of an APT preferences files bigger than the size limit. """ - preferences_filename = os.path.join(self.etc_apt_directory, - "preferences") + preferences_filename = os.path.join( + self.etc_apt_directory, + "preferences", + ) limit = self.plugin.size_limit self.makeFile(path=preferences_filename, content="a" * (limit + 1)) - self.assertEqual(self.plugin.get_data(), { - preferences_filename: "a" * (limit - len(preferences_filename))}) + self.assertEqual( + self.plugin.get_data(), + {preferences_filename: "a" * (limit - len(preferences_filename))}, + ) def test_get_data_with_many_big_files(self): """ L{AptPreferences.get_data} truncates the contents of individual APT preferences files in the total size is bigger than the size limit. """ - preferences_directory = os.path.join(self.etc_apt_directory, - "preferences.d") + preferences_directory = os.path.join( + self.etc_apt_directory, + "preferences.d", + ) self.makeDir(path=preferences_directory) limit = self.plugin.size_limit - filename1 = self.makeFile(dirname=preferences_directory, - content="a" * (limit // 2)) - filename2 = self.makeFile(dirname=preferences_directory, - content="b" * (limit // 2)) - self.assertEqual(self.plugin.get_data(), - {filename1: "a" * (limit // 2 - len(filename1)), - filename2: "b" * (limit // 2 - len(filename2))}) + filename1 = self.makeFile( + dirname=preferences_directory, + content="a" * (limit // 2), + ) + filename2 = self.makeFile( + dirname=preferences_directory, + content="b" * (limit // 2), + ) + self.assertEqual( + self.plugin.get_data(), + { + filename1: "a" * (limit // 2 - len(filename1)), + filename2: "b" * (limit // 2 - len(filename2)), + }, + ) def test_exchange_without_apt_preferences_data(self): """ @@ -103,20 +127,30 @@ further message with the C{data} field set to C{None} is sent. """ self.mstore.set_accepted_types(["apt-preferences"]) - main_preferences_filename = os.path.join(self.etc_apt_directory, - "preferences") + main_preferences_filename = os.path.join( + self.etc_apt_directory, + "preferences", + ) self.makeFile(path=main_preferences_filename, content="crap") - preferences_directory = os.path.join(self.etc_apt_directory, - "preferences.d") + preferences_directory = os.path.join( + self.etc_apt_directory, + "preferences.d", + ) self.makeDir(path=preferences_directory) - sub_preferences_filename = self.makeFile(dirname=preferences_directory, - content="foo") + sub_preferences_filename = self.makeFile( + dirname=preferences_directory, + content="foo", + ) self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(messages[0]["type"], "apt-preferences") - self.assertEqual(messages[0]["data"], - {main_preferences_filename: u"crap", - sub_preferences_filename: u"foo"}) + self.assertEqual( + messages[0]["data"], + { + main_preferences_filename: "crap", + sub_preferences_filename: "foo", + }, + ) for filename in messages[0]["data"]: self.assertTrue(isinstance(filename, unicode)) @@ -135,8 +169,10 @@ further message with the C{data} field set to C{None} is sent. """ self.mstore.set_accepted_types(["apt-preferences"]) - preferences_filename = os.path.join(self.etc_apt_directory, - "preferences") + preferences_filename = os.path.join( + self.etc_apt_directory, + "preferences", + ) self.makeFile(path=preferences_filename, content="crap") self.plugin.exchange() messages = self.mstore.get_pending_messages() @@ -151,8 +187,10 @@ """ self.mstore.set_accepted_types(["apt-preferences"]) - preferences_filename = os.path.join(self.etc_apt_directory, - "preferences") + preferences_filename = os.path.join( + self.etc_apt_directory, + "preferences", + ) self.makeFile(path=preferences_filename, content="crap") with mock.patch.object(self.remote, "send_message"): @@ -160,15 +198,20 @@ self.mstore.set_accepted_types([]) self.plugin.run() self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) def test_resynchronize(self): """ The "resynchronize" reactor message cause the plugin to send fresh data. """ - preferences_filename = os.path.join(self.etc_apt_directory, - "preferences") + preferences_filename = os.path.join( + self.etc_apt_directory, + "preferences", + ) self.makeFile(path=preferences_filename, content="crap") self.mstore.set_accepted_types(["apt-preferences"]) self.plugin.run() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_cephusage.py landscape-client-23.08/landscape/client/monitor/tests/test_cephusage.py --- landscape-client-23.02/landscape/client/monitor/tests/test_cephusage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_cephusage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,16 @@ -import mock -from landscape.lib.fs import touch_file -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from unittest import mock + from landscape.client.monitor.cephusage import CephUsage +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.fs import touch_file class CephUsagePluginTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): - super(CephUsagePluginTest, self).setUp() + super().setUp() self.mstore = self.broker_service.message_store self.plugin = CephUsage(create_time=self.reactor.time) @@ -40,10 +42,15 @@ self.monitor.exchange() self.assertMessages( self.mstore.get_pending_messages(), - [{"type": "ceph-usage", - "ring-id": ring_id, - "ceph-usages": [], - "data-points": [point]}]) + [ + { + "type": "ceph-usage", + "ring-id": ring_id, + "ceph-usages": [], + "data-points": [point], + }, + ], + ) def test_create_message(self): """ @@ -81,8 +88,10 @@ monitor_interval = 300 plugin = CephUsage( - interval=interval, monitor_interval=monitor_interval, - create_time=self.reactor.time) + interval=interval, + monitor_interval=monitor_interval, + create_time=self.reactor.time, + ) self.monitor.add(plugin) @@ -120,7 +129,8 @@ self.assertFalse(plugin._should_run()) logging.assert_called_once_with( "This machine does not appear to be a Ceph machine. " - "Deactivating plugin.") + "Deactivating plugin.", + ) def test_wb_should_run(self): """ @@ -144,8 +154,10 @@ stats = {"kb": 10240, "kb_avail": 8192, "kb_used": 2048} plugin = CephUsage( - create_time=self.reactor.time, interval=interval, - monitor_interval=interval) + create_time=self.reactor.time, + interval=interval, + monitor_interval=interval, + ) self.monitor.add(plugin) @@ -155,7 +167,9 @@ plugin._handle_usage(stats) self.assertEqual( - [(300, 10485760, 8388608, 2097152)], plugin._ceph_usage_points) + [(300, 10485760, 8388608, 2097152)], + plugin._ceph_usage_points, + ) def test_resynchronize_message_calls_reset_method(self): """ diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_computerinfo.py landscape-client-23.08/landscape/client/monitor/tests/test_computerinfo.py --- landscape-client-23.02/landscape/client/monitor/tests/test_computerinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_computerinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,25 @@ -import mock import os import re +from unittest import mock -from twisted.internet.defer import succeed, fail, inlineCallbacks - -from landscape.lib.fetch import HTTPCodeError, PyCurlError +from twisted.internet.defer import fail +from twisted.internet.defer import inlineCallbacks +from twisted.internet.defer import succeed + +from landscape.client.monitor.computerinfo import ComputerInfo +from landscape.client.monitor.computerinfo import METADATA_RETRY_MAX +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.fetch import HTTPCodeError +from landscape.lib.fetch import PyCurlError from landscape.lib.fs import create_text_file -from landscape.client.monitor.computerinfo import ( - ComputerInfo, METADATA_RETRY_MAX) -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper - -SAMPLE_LSB_RELEASE = "DISTRIB_ID=Ubuntu\n" \ - "DISTRIB_RELEASE=6.06\n" \ - "DISTRIB_CODENAME=dapper\n" \ - "DISTRIB_DESCRIPTION=\"Ubuntu 6.06.1 LTS\"\n" + +SAMPLE_LSB_RELEASE = ( + "DISTRIB_ID=Ubuntu\n" + "DISTRIB_RELEASE=6.06\n" + "DISTRIB_CODENAME=dapper\n" + 'DISTRIB_DESCRIPTION="Ubuntu 6.06.1 LTS"\n' +) def get_fqdn(): @@ -109,7 +115,6 @@ self.assertEqual(len(messages), 1) def test_report_changed_hostnames(self): - def hostname_factory(hostnames=["ooga", "wubble", "wubble"]): i = 0 while i < len(hostnames): @@ -118,8 +123,10 @@ self.mstore.set_accepted_types(["computer-info"]) hostname_fact = hostname_factory() - plugin = ComputerInfo(get_fqdn=lambda: next(hostname_fact), - fetch_async=self.fetch_func) + plugin = ComputerInfo( + get_fqdn=lambda: next(hostname_fact), + fetch_async=self.fetch_func, + ) self.monitor.add(plugin) plugin.exchange() @@ -135,8 +142,10 @@ def test_get_total_memory(self): self.mstore.set_accepted_types(["computer-info"]) meminfo_filename = self.makeFile(self.sample_memory_info) - plugin = ComputerInfo(meminfo_filename=meminfo_filename, - fetch_async=self.fetch_func) + plugin = ComputerInfo( + meminfo_filename=meminfo_filename, + fetch_async=self.fetch_func, + ) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() @@ -260,12 +269,14 @@ self.assertEqual(message["release"], "6.06") self.assertEqual(message["code-name"], "dapper") - plugin._lsb_release_filename = self.makeFile("""\ + plugin._lsb_release_filename = self.makeFile( + """\ DISTRIB_ID=Ubuntu DISTRIB_RELEASE=6.10 DISTRIB_CODENAME=edgy DISTRIB_DESCRIPTION="Ubuntu 6.10" -""") +""", + ) plugin.exchange() message = self.mstore.get_pending_messages()[1] self.assertEqual(message["type"], "distribution-info") @@ -276,13 +287,15 @@ def test_unknown_distribution_key(self): self.mstore.set_accepted_types(["distribution-info"]) - lsb_release_filename = self.makeFile("""\ + lsb_release_filename = self.makeFile( + """\ DISTRIB_ID=Ubuntu DISTRIB_RELEASE=6.10 DISTRIB_CODENAME=edgy DISTRIB_DESCRIPTION="Ubuntu 6.10" DISTRIB_NEW_UNEXPECTED_KEY=ooga -""") +""", + ) plugin = ComputerInfo(lsb_release_filename=lsb_release_filename) self.monitor.add(plugin) @@ -301,25 +314,36 @@ """ self.mstore.set_accepted_types(["distribution-info", "computer-info"]) meminfo_filename = self.makeFile(self.sample_memory_info) - plugin = ComputerInfo(get_fqdn=get_fqdn, - meminfo_filename=meminfo_filename, - lsb_release_filename=self.lsb_release_filename, - root_path=self.makeDir(), - fetch_async=self.fetch_func) + plugin = ComputerInfo( + get_fqdn=get_fqdn, + meminfo_filename=meminfo_filename, + lsb_release_filename=self.lsb_release_filename, + root_path=self.makeDir(), + fetch_async=self.fetch_func, + ) self.monitor.add(plugin) plugin.exchange() self.reactor.fire("resynchronize", scopes=["computer"]) plugin.exchange() - computer_info = {"type": "computer-info", "hostname": "ooga.local", - "timestamp": 0, "total-memory": 1510, - "total-swap": 1584} - - dist_info = {"type": "distribution-info", - "code-name": "dapper", "description": "Ubuntu 6.06.1 LTS", - "distributor-id": "Ubuntu", "release": "6.06"} - self.assertMessages(self.mstore.get_pending_messages(), - [computer_info, dist_info, - computer_info, dist_info]) + computer_info = { + "type": "computer-info", + "hostname": "ooga.local", + "timestamp": 0, + "total-memory": 1510, + "total-swap": 1584, + } + + dist_info = { + "type": "distribution-info", + "code-name": "dapper", + "description": "Ubuntu 6.06.1 LTS", + "distributor-id": "Ubuntu", + "release": "6.06", + } + self.assertMessages( + self.mstore.get_pending_messages(), + [computer_info, dist_info, computer_info, dist_info], + ) def test_computer_info_call_on_accepted(self): plugin = ComputerInfo(fetch_async=self.fetch_func) @@ -327,10 +351,15 @@ self.mstore.set_accepted_types(["computer-info"]) with mock.patch.object(self.remote, "send_message"): - self.reactor.fire(("message-type-acceptance-changed", - "computer-info"), True) + self.reactor.fire( + ("message-type-acceptance-changed", "computer-info"), + True, + ) self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) def test_distribution_info_call_on_accepted(self): plugin = ComputerInfo() @@ -339,10 +368,15 @@ self.mstore.set_accepted_types(["distribution-info"]) with mock.patch.object(self.remote, "send_message"): - self.reactor.fire(("message-type-acceptance-changed", - "distribution-info"), True) + self.reactor.fire( + ("message-type-acceptance-changed", "distribution-info"), + True, + ) self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) def test_message_if_not_accepted(self): """ @@ -371,9 +405,13 @@ annotations_dir = self.monitor.config.annotations_path os.mkdir(annotations_dir) create_text_file( - os.path.join(annotations_dir, "annotation1"), "value1") + os.path.join(annotations_dir, "annotation1"), + "value1", + ) create_text_file( - os.path.join(annotations_dir, "annotation2"), "value2") + os.path.join(annotations_dir, "annotation2"), + "value2", + ) self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo() @@ -400,9 +438,9 @@ plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) - self.assertEqual(u"i00001", messages[0]["instance-id"]) - self.assertEqual(u"ami-00002", messages[0]["ami-id"]) - self.assertEqual(u"hs1.8xlarge", messages[0]["instance-type"]) + self.assertEqual("i00001", messages[0]["instance-id"]) + self.assertEqual("ami-00002", messages[0]["ami-id"]) + self.assertEqual("hs1.8xlarge", messages[0]["instance-type"]) def test_send_cloud_instance_metadata_only_once(self): """Only send the cloud information once per client restart.""" @@ -440,12 +478,19 @@ """ plugin = ComputerInfo(fetch_async=self.fetch_func) result = yield plugin._fetch_ec2_meta_data() - self.assertEqual({"instance-id": u"i00001", "ami-id": u"ami-00002", - "instance-type": u"hs1.8xlarge"}, result) + self.assertEqual( + { + "instance-id": "i00001", + "ami-id": "ami-00002", + "instance-type": "hs1.8xlarge", + }, + result, + ) self.assertEqual( " INFO: Querying cloud meta-data.\n" " INFO: Acquired cloud meta-data.\n", - self.logfile.getvalue()) + self.logfile.getvalue(), + ) @inlineCallbacks def test_fetch_ec2_meta_data_no_cloud_api_max_retry(self): @@ -460,8 +505,9 @@ plugin._cloud_retries = METADATA_RETRY_MAX result = yield plugin._fetch_ec2_meta_data() self.assertIn( - "INFO: No cloud meta-data available. " - "Error 60: pycurl error\n", self.logfile.getvalue()) + "INFO: No cloud meta-data available. " "Error 60: pycurl error\n", + self.logfile.getvalue(), + ) self.assertEqual(None, result) @inlineCallbacks @@ -478,7 +524,8 @@ self.assertIn( "INFO: No cloud meta-data available. Server returned " "HTTP code 404", - self.logfile.getvalue()) + self.logfile.getvalue(), + ) self.assertEqual(None, result) @inlineCallbacks @@ -498,6 +545,11 @@ # Fix the error condition for the retry. self.add_query_result("ami-id", b"ami-00002") result = yield plugin._fetch_ec2_meta_data() - self.assertEqual({"instance-id": u"i00001", "ami-id": u"ami-00002", - "instance-type": u"hs1.8xlarge"}, - result) + self.assertEqual( + { + "instance-id": "i00001", + "ami-id": "ami-00002", + "instance-type": "hs1.8xlarge", + }, + result, + ) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_computertags.py landscape-client-23.08/landscape/client/monitor/tests/test_computertags.py --- landscape-client-23.02/landscape/client/monitor/tests/test_computertags.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_computertags.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,6 @@ from landscape.client.monitor.computertags import ComputerTags -from landscape.client.tests.helpers import MonitorHelper, LandscapeTest +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper class ComputerTagsTest(LandscapeTest): @@ -7,8 +8,8 @@ helpers = [MonitorHelper] def setUp(self): - super(ComputerTagsTest, self).setUp() - test_sys_args = ['hello.py'] + super().setUp() + test_sys_args = ["hello.py"] self.plugin = ComputerTags(args=test_sys_args) self.monitor.add(self.plugin) @@ -16,8 +17,8 @@ """ Tags are read from the default config path """ - tags = 'check,linode,profile-test' - file_text = "[client]\ntags = {}".format(tags) + tags = "check,linode,profile-test" + file_text = f"[client]\ntags = {tags}" config_filename = self.config.default_config_filenames[0] self.makeFile(file_text, path=config_filename) self.assertEqual(self.plugin.get_data(), tags) @@ -26,10 +27,10 @@ """ Tags are read from path specified in command line args """ - tags = 'check,linode,profile-test' - file_text = "[client]\ntags = {}".format(tags) + tags = "check,linode,profile-test" + file_text = f"[client]\ntags = {tags}" filename = self.makeFile(file_text) - test_sys_args = ['hello.py', '--config', filename] + test_sys_args = ["hello.py", "--config", filename] self.plugin.args = test_sys_args self.assertEqual(self.plugin.get_data(), tags) @@ -37,22 +38,22 @@ """ Tags message is sent correctly """ - tags = 'check,linode,profile-test' - file_text = "[client]\ntags = {}".format(tags) + tags = "check,linode,profile-test" + file_text = f"[client]\ntags = {tags}" config_filename = self.config.default_config_filenames[0] self.makeFile(file_text, path=config_filename) self.mstore.set_accepted_types(["computer-tags"]) self.plugin.exchange() messages = self.mstore.get_pending_messages() - self.assertEqual(messages[0]['tags'], tags) + self.assertEqual(messages[0]["tags"], tags) def test_invalid_tags(self): """ If invalid tag detected then message contents should be None """ - tags = 'check,lin ode' - file_text = "[client]\ntags = {}".format(tags) + tags = "check,lin ode" + file_text = f"[client]\ntags = {tags}" config_filename = self.config.default_config_filenames[0] self.makeFile(file_text, path=config_filename) self.assertEqual(self.plugin.get_data(), None) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_computeruptime.py landscape-client-23.08/landscape/client/monitor/tests/test_computeruptime.py --- landscape-client-23.02/landscape/client/monitor/tests/test_computeruptime.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_computeruptime.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,10 @@ -from landscape.lib.testing import append_login_data +from unittest.mock import ANY +from unittest.mock import Mock + from landscape.client.monitor.computeruptime import ComputerUptime -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -from mock import ANY, Mock +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.testing import append_login_data class ComputerUptimeTest(LandscapeTest): @@ -16,8 +19,12 @@ def test_deliver_message(self): """Test delivering a message with the boot and shutdown times.""" wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=535) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() @@ -31,8 +38,12 @@ def test_only_deliver_unique_shutdown_messages(self): """Test that only unique shutdown messages are generated.""" wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=535) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) @@ -44,8 +55,12 @@ self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [535]) - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=3212) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=3212, + ) plugin.run() message = self.mstore.get_pending_messages()[1] @@ -57,10 +72,18 @@ def test_only_queue_messages_with_data(self): """Test ensures that messages without data are not queued.""" wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="reboot", - entry_time_seconds=3212) - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=3562) + append_login_data( + wtmp_filename, + tty_device="~", + username="reboot", + entry_time_seconds=3212, + ) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=3562, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) @@ -80,8 +103,12 @@ def test_boot_time_same_as_last_known_startup_time(self): """Ensure one message is queued for duplicate startup times.""" wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="reboot", - entry_time_seconds=3212) + append_login_data( + wtmp_filename, + tty_device="~", + username="reboot", + entry_time_seconds=3212, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() @@ -97,16 +124,28 @@ the client. This is simulated by creating a new instance of the plugin. """ wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="reboot", - entry_time_seconds=3212) + append_login_data( + wtmp_filename, + tty_device="~", + username="reboot", + entry_time_seconds=3212, + ) plugin1 = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin1) plugin1.run() - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=3871) - append_login_data(wtmp_filename, tty_device="~", username="reboot", - entry_time_seconds=4657) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=3871, + ) + append_login_data( + wtmp_filename, + tty_device="~", + username="reboot", + entry_time_seconds=4657, + ) plugin2 = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin2) plugin2.run() @@ -122,10 +161,18 @@ """Test ensures reading falls back to logrotated files.""" wtmp_filename = self.makeFile("") logrotated_filename = self.makeFile("", path=wtmp_filename + ".1") - append_login_data(logrotated_filename, tty_device="~", - username="reboot", entry_time_seconds=125) - append_login_data(logrotated_filename, tty_device="~", - username="shutdown", entry_time_seconds=535) + append_login_data( + logrotated_filename, + tty_device="~", + username="reboot", + entry_time_seconds=125, + ) + append_login_data( + logrotated_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) @@ -143,14 +190,30 @@ """Test ensures reading falls back to logrotated files.""" wtmp_filename = self.makeFile("") logrotated_filename = self.makeFile("", path=wtmp_filename + ".1") - append_login_data(logrotated_filename, tty_device="~", - username="reboot", entry_time_seconds=125) - append_login_data(logrotated_filename, tty_device="~", - username="shutdown", entry_time_seconds=535) - append_login_data(wtmp_filename, tty_device="~", - username="reboot", entry_time_seconds=1025) - append_login_data(wtmp_filename, tty_device="~", - username="shutdown", entry_time_seconds=1150) + append_login_data( + logrotated_filename, + tty_device="~", + username="reboot", + entry_time_seconds=125, + ) + append_login_data( + logrotated_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) + append_login_data( + wtmp_filename, + tty_device="~", + username="reboot", + entry_time_seconds=1025, + ) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=1150, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) @@ -177,16 +240,21 @@ def test_call_on_accepted(self): wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=535) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) self.remote.send_message = Mock() - self.reactor.fire(("message-type-acceptance-changed", - "computer-uptime"), - True) + self.reactor.fire( + ("message-type-acceptance-changed", "computer-uptime"), + True, + ) self.remote.send_message.assert_called_once_with(ANY, ANY, urgent=True) def test_no_message_if_not_accepted(self): @@ -196,8 +264,12 @@ """ self.mstore.set_accepted_types([]) wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=535) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_config.py landscape-client-23.08/landscape/client/monitor/tests/test_config.py --- landscape-client-23.02/landscape/client/monitor/tests/test_config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,11 @@ +from landscape.client.monitor.config import ALL_PLUGINS +from landscape.client.monitor.config import MonitorConfiguration from landscape.client.tests.helpers import LandscapeTest -from landscape.client.monitor.config import MonitorConfiguration, ALL_PLUGINS class MonitorConfigurationTest(LandscapeTest): - def setUp(self): - super(MonitorConfigurationTest, self).setUp() + super().setUp() self.config = MonitorConfiguration() def test_plugin_factories(self): @@ -21,7 +21,9 @@ """ self.config.load(["--monitor-plugins", " ComputerInfo, LoadAverage "]) self.assertEqual( - self.config.plugin_factories, ["ComputerInfo", "LoadAverage"]) + self.config.plugin_factories, + ["ComputerInfo", "LoadAverage"], + ) def test_flush_interval(self): """ diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_cpuusage.py landscape-client-23.08/landscape/client/monitor/tests/test_cpuusage.py --- landscape-client-23.02/landscape/client/monitor/tests/test_cpuusage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_cpuusage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ -from landscape.client.monitor.cpuusage import CPUUsage, LAST_MESURE_KEY -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.monitor.cpuusage import CPUUsage +from landscape.client.monitor.cpuusage import LAST_MESURE_KEY +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper class CPUUsagePluginTest(LandscapeTest): @@ -199,9 +201,10 @@ self.monitor.exchange() - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "cpu-usage", - "cpu-usages": [(60, 1.0)]}]) + self.assertMessages( + self.mstore.get_pending_messages(), + [{"type": "cpu-usage", "cpu-usages": [(60, 1.0)]}], + ) def test_no_message_if_not_accepted(self): """ @@ -210,8 +213,7 @@ """ interval = 30 - plugin = CPUUsage(create_time=self.reactor.time, - interval=interval) + plugin = CPUUsage(create_time=self.reactor.time, interval=interval) self.monitor.add(plugin) @@ -238,7 +240,7 @@ self.assertNotEqual([], plugin._cpu_usage_points) self.assertEqual([(300, 1.0), (600, 1.0)], plugin._cpu_usage_points) - def test_plugin_run_with_None(self): + def test_plugin_run_with_None(self): # noqa: N802 """ The plugin's run() method fills in the _cpu_usage_points with accumulated samples after each C{monitor.step_size} period. @@ -266,5 +268,7 @@ # over with the new points. plugin._get_cpu_usage = fake_get_cpu_usage self.reactor.advance(self.monitor.step_size) - self.assertEqual([(300, 1.0), (600, 1.0), (900, 1.0)], - plugin._cpu_usage_points) + self.assertEqual( + [(300, 1.0), (600, 1.0), (900, 1.0)], + plugin._cpu_usage_points, + ) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_livepatch.py landscape-client-23.08/landscape/client/monitor/tests/test_livepatch.py --- landscape-client-23.02/landscape/client/monitor/tests/test_livepatch.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_livepatch.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,140 @@ +import json +import yaml +from unittest import mock + +from landscape.client.monitor.livepatch import LivePatch +from landscape.client.tests.helpers import LandscapeTest, MonitorHelper + + +def subprocess_livepatch_mock(*args, **kwargs): + """Mocks a json and yaml (humane) output""" + data = {'Test': 'test', 'Last-Check': 1, 'Uptime': 1, 'last check': 1} + if 'json' in args[0]: + output = json.dumps(data) + elif 'humane' in args[0]: + output = yaml.dump(data) + return mock.Mock(stdout=output, stderr="", returncode=0) + + +class LivePatchTest(LandscapeTest): + """Livepatch status plugin tests.""" + + helpers = [MonitorHelper] + + def setUp(self): + super(LivePatchTest, self).setUp() + self.mstore.set_accepted_types(["livepatch"]) + + def test_livepatch(self): + """Tests calling livepatch status.""" + plugin = LivePatch() + self.monitor.add(plugin) + + with mock.patch("subprocess.run") as run_mock: + run_mock.side_effect = subprocess_livepatch_mock + plugin.exchange() + + messages = self.mstore.get_pending_messages() + self.assertTrue(len(messages) > 0) + message = json.loads(messages[0]["livepatch"]) + self.assertEqual(message["json"]["output"]["Test"], "test") + self.assertEqual(message["humane"]["output"]["Test"], "test") + self.assertEqual(message["json"]["return_code"], 0) + self.assertEqual(message["humane"]["return_code"], 0) + self.assertFalse(message["humane"]["error"]) + self.assertFalse(message["json"]["error"]) + + def test_livepatch_when_not_installed(self): + """Tests calling livepatch when it is not installed.""" + plugin = LivePatch() + self.monitor.add(plugin) + + with mock.patch("subprocess.run") as run_mock: + run_mock.side_effect = FileNotFoundError("Not found!") + plugin.exchange() + + messages = self.mstore.get_pending_messages() + message = json.loads(messages[0]["livepatch"]) + self.assertTrue(len(messages) > 0) + self.assertTrue(message["json"]["error"]) + self.assertTrue(message["humane"]["error"]) + self.assertEqual(message["json"]["return_code"], -1) + self.assertEqual(message["humane"]["return_code"], -1) + + def test_undefined_exception(self): + """Tests calling livepatch when random exception occurs""" + plugin = LivePatch() + self.monitor.add(plugin) + + with mock.patch("subprocess.run") as run_mock: + run_mock.side_effect = ValueError("Not found!") + plugin.exchange() + + messages = self.mstore.get_pending_messages() + message = json.loads(messages[0]["livepatch"]) + self.assertTrue(len(messages) > 0) + self.assertTrue(message["json"]["error"]) + self.assertTrue(message["humane"]["error"]) + self.assertEqual(message["json"]["return_code"], -2) + self.assertEqual(message["humane"]["return_code"], -2) + + def test_yaml_json_parse_error(self): + """ + If json or yaml parsing error than show exception and unparsed data + """ + plugin = LivePatch() + self.monitor.add(plugin) + + invalid_data = "'" + with mock.patch("subprocess.run") as run_mock: + run_mock.return_value = mock.Mock(stdout=invalid_data) + run_mock.return_value.returncode = 0 + plugin.exchange() + + messages = self.mstore.get_pending_messages() + message = json.loads(messages[0]["livepatch"]) + self.assertTrue(len(messages) > 0) + self.assertTrue(message["json"]["error"]) + self.assertTrue(message["humane"]["error"]) + self.assertEqual(message["json"]["output"], invalid_data) + self.assertEqual(message["humane"]["output"], invalid_data) + + def test_empty_string(self): + """ + If livepatch is disabled, stdout is empty string + """ + plugin = LivePatch() + self.monitor.add(plugin) + + invalid_data = "" + with mock.patch("subprocess.run") as run_mock: + run_mock.return_value = mock.Mock(stdout=invalid_data, + stderr='Error') + run_mock.return_value.returncode = 1 + plugin.exchange() + + messages = self.mstore.get_pending_messages() + message = json.loads(messages[0]["livepatch"]) + self.assertTrue(len(messages) > 0) + self.assertTrue(message["json"]["error"]) + self.assertTrue(message["humane"]["error"]) + self.assertEqual(message["humane"]["return_code"], 1) + self.assertEqual(message["json"]["output"], invalid_data) + self.assertEqual(message["humane"]["output"], invalid_data) + + def test_timestamped_fields_deleted(self): + """This is so data doesn't keep getting sent if not changed""" + + plugin = LivePatch() + self.monitor.add(plugin) + + with mock.patch("subprocess.run") as run_mock: + run_mock.side_effect = subprocess_livepatch_mock + plugin.exchange() + + messages = self.mstore.get_pending_messages() + self.assertTrue(len(messages) > 0) + message = json.loads(messages[0]["livepatch"]) + self.assertNotIn("Uptime", message["json"]["output"]) + self.assertNotIn("Last-Check", message["json"]["output"]) + self.assertNotIn("last check", message["humane"]["output"]) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_loadaverage.py landscape-client-23.08/landscape/client/monitor/tests/test_loadaverage.py --- landscape-client-23.02/landscape/client/monitor/tests/test_loadaverage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_loadaverage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,8 @@ -import mock +from unittest import mock from landscape.client.monitor.loadaverage import LoadAverage -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper def get_load_average(): @@ -45,9 +46,14 @@ Sample data is used to ensure that the load average included in the message is calculated correctly. """ - get_load_average = (lambda: (0.15, 1, 500)) - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=get_load_average) + + def get_load_average(): + return (0.15, 1, 500) + + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=get_load_average, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) @@ -64,8 +70,10 @@ fall on a step boundary. """ _load_averages = get_load_average() - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=lambda: next(_load_averages)) + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=lambda: next(_load_averages), + ) self.monitor.add(plugin) for i in range(1, 10): @@ -82,8 +90,10 @@ expected. """ load_averages = get_load_average() - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=lambda: next(load_averages)) + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=lambda: next(load_averages), + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) @@ -103,8 +113,10 @@ self.mstore.set_accepted_types(["load-average"]) load_averages = get_load_average() - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=lambda: next(load_averages)) + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=lambda: next(load_averages), + ) self.monitor.add(plugin) self.monitor.exchange() @@ -120,30 +132,45 @@ self.mstore.set_accepted_types(["load-average"]) load_averages = get_load_average() - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=lambda: next(load_averages)) + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=lambda: next(load_averages), + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "load-average", - "load-averages": [(300, 10.5), (600, 30.5)]}]) + self.assertMessages( + self.mstore.get_pending_messages(), + [ + { + "type": "load-average", + "load-averages": [(300, 10.5), (600, 30.5)], + }, + ], + ) def test_call_on_accepted(self): load_averages = get_load_average() - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=lambda: next(load_averages)) + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=lambda: next(load_averages), + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 1) with mock.patch.object(self.remote, "send_message"): - self.reactor.fire(("message-type-acceptance-changed", - "load-average"), True) + self.reactor.fire( + ("message-type-acceptance-changed", "load-average"), + True, + ) self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) def test_no_message_if_not_accepted(self): """ @@ -151,8 +178,10 @@ accepting their type. """ load_averages = get_load_average() - plugin = LoadAverage(create_time=self.reactor.time, - get_load_average=lambda: next(load_averages)) + plugin = LoadAverage( + create_time=self.reactor.time, + get_load_average=lambda: next(load_averages), + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_memoryinfo.py landscape-client-23.08/landscape/client/monitor/tests/test_memoryinfo.py --- landscape-client-23.02/landscape/client/monitor/tests/test_memoryinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_memoryinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,8 @@ -import mock +from unittest import mock from landscape.client.monitor.memoryinfo import MemoryInfo -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper class MemoryInfoTest(LandscapeTest): @@ -35,7 +36,7 @@ """ def setUp(self): - super(MemoryInfoTest, self).setUp() + super().setUp() def test_read_proc_meminfo(self): """ @@ -68,8 +69,10 @@ messages contain expected free memory and free swap values. """ filename = self.makeFile(self.SAMPLE_DATA) - plugin = MemoryInfo(source_filename=filename, - create_time=self.reactor.time) + plugin = MemoryInfo( + source_filename=filename, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) @@ -85,8 +88,10 @@ expected. """ filename = self.makeFile(self.SAMPLE_DATA) - plugin = MemoryInfo(source_filename=filename, - create_time=self.reactor.time) + plugin = MemoryInfo( + source_filename=filename, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) @@ -104,8 +109,10 @@ fall on a step boundary. """ filename = self.makeFile(self.SAMPLE_DATA) - plugin = MemoryInfo(source_filename=filename, - create_time=self.reactor.time) + plugin = MemoryInfo( + source_filename=filename, + create_time=self.reactor.time, + ) self.monitor.add(plugin) step_size = self.monitor.step_size @@ -125,8 +132,10 @@ self.mstore.set_accepted_types(["memory-info"]) filename = self.makeFile(self.SAMPLE_DATA) - plugin = MemoryInfo(source_filename=filename, - create_time=self.reactor.time) + plugin = MemoryInfo( + source_filename=filename, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.monitor.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 0) @@ -141,31 +150,48 @@ self.mstore.set_accepted_types(["memory-info"]) filename = self.makeFile(self.SAMPLE_DATA) - plugin = MemoryInfo(source_filename=filename, - create_time=self.reactor.time) + plugin = MemoryInfo( + source_filename=filename, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size * 2) self.monitor.exchange() - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "memory-info", - "memory-info": [(step_size, 852, 1567), - (step_size * 2, 852, 1567)]}]) + self.assertMessages( + self.mstore.get_pending_messages(), + [ + { + "type": "memory-info", + "memory-info": [ + (step_size, 852, 1567), + (step_size * 2, 852, 1567), + ], + }, + ], + ) def test_call_on_accepted(self): - plugin = MemoryInfo(source_filename=self.makeFile(self.SAMPLE_DATA), - create_time=self.reactor.time) + plugin = MemoryInfo( + source_filename=self.makeFile(self.SAMPLE_DATA), + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 1) with mock.patch.object(self.remote, "send_message"): - self.reactor.fire(("message-type-acceptance-changed", - "memory-info"), True) + self.reactor.fire( + ("message-type-acceptance-changed", "memory-info"), + True, + ) self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) def test_no_message_if_not_accepted(self): """ diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_monitor.py landscape-client-23.08/landscape/client/monitor/tests/test_monitor.py --- landscape-client-23.02/landscape/client/monitor/tests/test_monitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_monitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,10 @@ -from mock import Mock +from unittest.mock import Mock +from landscape.client.broker.client import BrokerClientPlugin from landscape.client.monitor.monitor import Monitor +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper from landscape.lib.persist import Persist -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -from landscape.client.broker.client import BrokerClientPlugin class MonitorTest(LandscapeTest): @@ -51,7 +52,8 @@ self.monitor.persist.save = Mock() self.reactor.advance(self.config.flush_interval * 3) self.monitor.persist.save.assert_called_with( - self.monitor.persist_filename) + self.monitor.persist_filename, + ) self.assertEqual(self.monitor.persist.save.call_count, 3) def test_creating_loads_persist(self): @@ -64,6 +66,10 @@ persist.set("a", "Hi there!") persist.save(filename) - monitor = Monitor(self.reactor, self.config, persist=Persist(), - persist_filename=filename) + monitor = Monitor( + self.reactor, + self.config, + persist=Persist(), + persist_filename=filename, + ) self.assertEqual(monitor.persist.get("a"), "Hi there!") diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_mountinfo.py landscape-client-23.08/landscape/client/monitor/tests/test_mountinfo.py --- landscape-client-23.02/landscape/client/monitor/tests/test_mountinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_mountinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,18 @@ -import mock import os import tempfile +from unittest import mock -from twisted.python.compat import StringType as basestring from twisted.python.compat import long +from twisted.python.compat import StringType -from landscape.lib.testing import mock_counter from landscape.client.monitor.mountinfo import MountInfo -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.testing import mock_counter -mb = (lambda x: x * 1024 * 1024) +def mb(x): + return x * 1024 * 1024 def statvfs_result_fixture(path): @@ -35,7 +37,8 @@ kwargs["mtab_file"] = self.makeFile("/dev/hda1 / ext3 rw 0 0\n") if "statvfs" not in kwargs: kwargs["statvfs"] = lambda path: os.statvfs_result( - (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + ) plugin = MountInfo(*args, **kwargs) # To make sure tests are isolated from the real system by default. plugin.is_device_removable = lambda x: False @@ -61,28 +64,32 @@ self.assertTrue("mount-info" in message) self.assertTrue(len(message["mount-info"]) > 0) - keys = set(["filesystem", "total-space", "device", "mount-point"]) + keys = {"filesystem", "total-space", "device", "mount-point"} for now, mount_info in message["mount-info"]: self.assertEqual(set(mount_info.keys()), keys) - self.assertTrue(isinstance(mount_info["filesystem"], basestring)) - self.assertTrue(isinstance(mount_info["device"], basestring)) + self.assertTrue(isinstance(mount_info["filesystem"], StringType)) + self.assertTrue(isinstance(mount_info["device"], StringType)) self.assertTrue(isinstance(mount_info["total-space"], (int, long))) - self.assertTrue(isinstance(mount_info["mount-point"], basestring)) + self.assertTrue(isinstance(mount_info["mount-point"], StringType)) def test_read_sample_data(self): """ Sample data is used to ensure that the free space included in the message is calculated correctly. """ + def statvfs(path): if path == "/": return os.statvfs_result( - (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0, 0)) + (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0, 0), + ) else: return os.statvfs_result( - (4096, 0, mb(10000), mb(1000), 0, 0, 0, 0, 0, 0)) + (4096, 0, mb(10000), mb(1000), 0, 0, 0, 0, 0, 0), + ) - filename = self.makeFile("""\ + filename = self.makeFile( + """\ rootfs / rootfs rw 0 0 none /dev ramfs rw 0 0 /dev/hda1 / ext3 rw 0 0 @@ -96,9 +103,11 @@ /dev/hde1 /mnt/hde1 reiserfs rw 0 0 /dev/hde1 /mnt/bind reiserfs rw 0 0 /dev/sdb2 /media/Boot\\040OSX hfsplus nls=utf8 0 0 -""") +""", + ) - mtab_filename = self.makeFile("""\ + mtab_filename = self.makeFile( + """\ rootfs / rootfs rw 0 0 none /dev ramfs rw 0 0 /dev/hda1 / ext3 rw 0 0 @@ -112,10 +121,14 @@ /dev/hde1 /mnt/hde1 reiserfs rw 0 0 /dev/hde1 /mnt/bind none rw,bind 0 0 /dev/sdb2 /media/Boot\\040OSX hfsplus rw 0 0 -""") - plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, - create_time=self.reactor.time, - mtab_file=mtab_filename) +""", + ) + plugin = self.get_mount_info( + mounts_file=filename, + statvfs=statvfs, + create_time=self.reactor.time, + mtab_file=mtab_filename, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) @@ -127,18 +140,35 @@ self.assertEqual(len(mount_info), 3) - self.assertEqual(mount_info[0][1], - {"device": "/dev/hda1", "mount-point": "/", - "filesystem": "ext3", "total-space": 4096000}) - - self.assertEqual(mount_info[1][1], - {"device": "/dev/hde1", "mount-point": "/mnt/hde1", - "filesystem": "reiserfs", "total-space": 40960000}) + self.assertEqual( + mount_info[0][1], + { + "device": "/dev/hda1", + "mount-point": "/", + "filesystem": "ext3", + "total-space": 4096000, + }, + ) + + self.assertEqual( + mount_info[1][1], + { + "device": "/dev/hde1", + "mount-point": "/mnt/hde1", + "filesystem": "reiserfs", + "total-space": 40960000, + }, + ) self.assertEqual( mount_info[2][1], - {"device": "/dev/sdb2", "mount-point": "/media/Boot OSX", - "filesystem": "hfsplus", "total-space": 40960000}) + { + "device": "/dev/sdb2", + "mount-point": "/media/Boot OSX", + "filesystem": "hfsplus", + "total-space": 40960000, + }, + ) def test_read_changing_total_space(self): """ @@ -154,11 +184,14 @@ def statvfs(path, multiplier=lambda: next(counter)): return os.statvfs_result( - (4096, 0, mb(multiplier() * 1000), mb(100), 0, 0, 0, 0, 0, 0)) + (4096, 0, mb(multiplier() * 1000), mb(100), 0, 0, 0, 0, 0, 0), + ) plugin = self.get_mount_info( - statvfs=statvfs, create_time=self.reactor.time, - interval=self.monitor.step_size) + statvfs=statvfs, + create_time=self.reactor.time, + interval=self.monitor.step_size, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) @@ -168,11 +201,19 @@ self.assertEqual(len(mount_info), 2) for i, total_space in enumerate([4096000, 8192000]): - self.assertEqual(mount_info[i][0], - (i + 1) * self.monitor.step_size) - self.assertEqual(mount_info[i][1], - {"device": "/dev/hda1", "filesystem": "ext3", - "mount-point": "/", "total-space": total_space}) + self.assertEqual( + mount_info[i][0], + (i + 1) * self.monitor.step_size, + ) + self.assertEqual( + mount_info[i][1], + { + "device": "/dev/hda1", + "filesystem": "ext3", + "mount-point": "/", + "total-space": total_space, + }, + ) def test_read_disjointed_changing_total_space(self): """ @@ -189,18 +230,25 @@ def statvfs(path, multiplier=lambda: next(counter)): if path == "/": return os.statvfs_result( - (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0, 0)) + (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0, 0), + ) return os.statvfs_result( - (4096, 0, mb(multiplier() * 1000), mb(100), 0, 0, 0, 0, 0, 0)) + (4096, 0, mb(multiplier() * 1000), mb(100), 0, 0, 0, 0, 0, 0), + ) - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/hda1 / ext3 rw 0 0 /dev/hde1 /mnt/hde1 ext3 rw 0 0 -""") - plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, - create_time=self.reactor.time, - interval=self.monitor.step_size, - mtab_file=filename) +""", + ) + plugin = self.get_mount_info( + mounts_file=filename, + statvfs=statvfs, + create_time=self.reactor.time, + interval=self.monitor.step_size, + mtab_file=filename, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) @@ -212,19 +260,37 @@ self.assertEqual(len(mount_info), 3) self.assertEqual(mount_info[0][0], self.monitor.step_size) - self.assertEqual(mount_info[0][1], - {"device": "/dev/hda1", "mount-point": "/", - "filesystem": "ext3", "total-space": 4096000}) + self.assertEqual( + mount_info[0][1], + { + "device": "/dev/hda1", + "mount-point": "/", + "filesystem": "ext3", + "total-space": 4096000, + }, + ) self.assertEqual(mount_info[1][0], self.monitor.step_size) - self.assertEqual(mount_info[1][1], - {"device": "/dev/hde1", "mount-point": "/mnt/hde1", - "filesystem": "ext3", "total-space": 4096000}) + self.assertEqual( + mount_info[1][1], + { + "device": "/dev/hde1", + "mount-point": "/mnt/hde1", + "filesystem": "ext3", + "total-space": 4096000, + }, + ) self.assertEqual(mount_info[2][0], self.monitor.step_size * 2) - self.assertEqual(mount_info[2][1], - {"device": "/dev/hde1", "mount-point": "/mnt/hde1", - "filesystem": "ext3", "total-space": 8192000}) + self.assertEqual( + mount_info[2][1], + { + "device": "/dev/hde1", + "mount-point": "/mnt/hde1", + "filesystem": "ext3", + "total-space": 8192000, + }, + ) def test_exchange_messages(self): """ @@ -234,7 +300,9 @@ delivered in a single message. """ plugin = self.get_mount_info( - statvfs=statvfs_result_fixture, create_time=self.reactor.time) + statvfs=statvfs_result_fixture, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) @@ -262,7 +330,9 @@ available, None will be returned when messages are created. """ plugin = self.get_mount_info( - statvfs=statvfs_result_fixture, create_time=self.reactor.time) + statvfs=statvfs_result_fixture, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) @@ -280,16 +350,20 @@ really test anything since the current behaviour is to ignore any mount point for which the device doesn't start with /dev. """ - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/hdc4 /mm xfs rw 0 0 /mm/ubuntu-mirror /home/dchroot/warty/mirror none bind 0 0 /mm/ubuntu-mirror /home/dchroot/hoary/mirror none bind 0 0 /mm/ubuntu-mirror /home/dchroot/breezy/mirror none bind 0 0 -""") - plugin = self.get_mount_info(mounts_file=filename, - statvfs=statvfs_result_fixture, - create_time=self.reactor.time, - mtab_file=filename) +""", + ) + plugin = self.get_mount_info( + mounts_file=filename, + statvfs=statvfs_result_fixture, + create_time=self.reactor.time, + mtab_file=filename, + ) step_size = self.monitor.step_size self.monitor.add(plugin) @@ -302,9 +376,15 @@ self.assertEqual(len(mount_info), 1) self.assertEqual(mount_info[0][0], step_size) - self.assertEqual(mount_info[0][1], - {"device": "/dev/hdc4", "mount-point": "/mm", - "filesystem": "xfs", "total-space": 4096000}) + self.assertEqual( + mount_info[0][1], + { + "device": "/dev/hdc4", + "mount-point": "/mm", + "filesystem": "xfs", + "total-space": 4096000, + }, + ) def test_ignore_nfs_mounts(self): """ @@ -312,10 +392,12 @@ mount points. """ - filename = self.makeFile("""\ + filename = self.makeFile( + """\ ennui:/data /data nfs rw,v3,rsize=32768,wsize=32768,hard,lock,proto=udp,\ addr=ennui 0 0 -""") +""", + ) plugin = self.get_mount_info(mounts_file=filename, mtab_file=filename) self.monitor.add(plugin) plugin.run() @@ -342,10 +424,13 @@ def statvfs(path, multiplier=lambda: next(counter)): return os.statvfs_result( - (4096, 0, mb(1000), mb(multiplier() * 100), 0, 0, 0, 0, 0, 0)) + (4096, 0, mb(1000), mb(multiplier() * 100), 0, 0, 0, 0, 0, 0), + ) plugin = self.get_mount_info( - statvfs=statvfs, create_time=self.reactor.time) + statvfs=statvfs, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) @@ -377,13 +462,17 @@ Test ensures all expected messages are created and contain the right datatypes. """ - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/hda2 / xfs rw 0 0 -""") - plugin = self.get_mount_info(mounts_file=filename, - statvfs=statvfs_result_fixture, - create_time=self.reactor.time, - mtab_file=filename) +""", + ) + plugin = self.get_mount_info( + mounts_file=filename, + statvfs=statvfs_result_fixture, + create_time=self.reactor.time, + mtab_file=filename, + ) step_size = self.monitor.step_size self.monitor.add(plugin) @@ -392,14 +481,27 @@ messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) - self.assertEqual(messages[0].get("mount-info"), - [(step_size, - {"device": "/dev/hda2", "mount-point": "/", - "filesystem": "xfs", "total-space": 4096000})]) - self.assertEqual(messages[1].get("free-space"), - [(step_size, "/", 409600)]) - self.assertTrue(isinstance(messages[1]["free-space"][0][2], - (int, long))) + self.assertEqual( + messages[0].get("mount-info"), + [ + ( + step_size, + { + "device": "/dev/hda2", + "mount-point": "/", + "filesystem": "xfs", + "total-space": 4096000, + }, + ), + ], + ) + self.assertEqual( + messages[1].get("free-space"), + [(step_size, "/", 409600)], + ) + self.assertTrue( + isinstance(messages[1]["free-space"][0][2], (int, long)), + ) def test_resynchronize(self): """ @@ -407,7 +509,9 @@ should be sent. """ plugin = self.get_mount_info( - create_time=self.reactor.time, statvfs=statvfs_result_fixture) + create_time=self.reactor.time, + statvfs=statvfs_result_fixture, + ) self.monitor.add(plugin) plugin.run() @@ -416,13 +520,23 @@ plugin.run() plugin.exchange() messages = self.mstore.get_pending_messages() - messages = [message for message in messages - if message["type"] == "mount-info"] + messages = [ + message for message in messages if message["type"] == "mount-info" + ] expected_message = { "type": "mount-info", - "mount-info": [(0, {"device": "/dev/hda1", "mount-point": "/", - "total-space": 4096000, - "filesystem": "ext3"})]} + "mount-info": [ + ( + 0, + { + "device": "/dev/hda1", + "mount-point": "/", + "total-space": 4096000, + "filesystem": "ext3", + }, + ), + ], + } self.assertMessages(messages, [expected_message, expected_message]) def test_bind_mounts(self): @@ -434,35 +548,56 @@ # From this test data, we expect only two mount points to be returned, # and the other two to be ignored (the rebound /dev/hda2 -> /mnt # mounting) - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/devices/by-uuid/12345567 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /dev/devices/by-uuid/12345567 /mnt ext3 rw 0 0 /dev/devices/by-uuid/12345567 /media/Boot\\040OSX hfsplus rw 0 0 -""") +""", + ) - mtab_filename = self.makeFile("""\ + mtab_filename = self.makeFile( + """\ /dev/hda1 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /opt /mnt none rw,bind 0 0 /opt /media/Boot\\040OSX none rw,bind 0 0 -""") +""", + ) plugin = MountInfo( - mounts_file=filename, create_time=self.reactor.time, - statvfs=statvfs_result_fixture, mtab_file=mtab_filename) + mounts_file=filename, + create_time=self.reactor.time, + statvfs=statvfs_result_fixture, + mtab_file=mtab_filename, + ) self.monitor.add(plugin) plugin.run() message = plugin.create_mount_info_message() - self.assertEqual(message.get("mount-info"), - [(0, {"device": "/dev/devices/by-uuid/12345567", - "mount-point": "/", "total-space": 4096000, - "filesystem": "ext3"}), - (0, {"device": "/dev/hda2", - "mount-point": "/usr", - "total-space": 4096000, - "filesystem": "ext3"}), - ]) + self.assertEqual( + message.get("mount-info"), + [ + ( + 0, + { + "device": "/dev/devices/by-uuid/12345567", + "mount-point": "/", + "total-space": 4096000, + "filesystem": "ext3", + }, + ), + ( + 0, + { + "device": "/dev/hda2", + "mount-point": "/usr", + "total-space": 4096000, + "filesystem": "ext3", + }, + ), + ], + ) def test_no_mtab_file(self): """ @@ -472,32 +607,57 @@ """ # In this test, we expect all mount points to be returned, as we can't # identify any as bind mounts. - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/devices/by-uuid/12345567 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /dev/devices/by-uuid/12345567 /mnt ext3 rw 0 0 -""") +""", + ) # mktemp isn't normally secure, due to race conditions, but in this # case, we don't actually create the file at all. mtab_filename = tempfile.mktemp() plugin = MountInfo( - mounts_file=filename, create_time=self.reactor.time, - statvfs=statvfs_result_fixture, mtab_file=mtab_filename) + mounts_file=filename, + create_time=self.reactor.time, + statvfs=statvfs_result_fixture, + mtab_file=mtab_filename, + ) self.monitor.add(plugin) plugin.run() message = plugin.create_mount_info_message() - self.assertEqual(message.get("mount-info"), - [(0, {"device": "/dev/devices/by-uuid/12345567", - "mount-point": "/", "total-space": 4096000, - "filesystem": "ext3"}), - (0, {"device": "/dev/hda2", - "mount-point": "/usr", - "total-space": 4096000, - "filesystem": "ext3"}), - (0, {"device": "/dev/devices/by-uuid/12345567", - "mount-point": "/mnt", - "total-space": 4096000, - "filesystem": "ext3"})]) + self.assertEqual( + message.get("mount-info"), + [ + ( + 0, + { + "device": "/dev/devices/by-uuid/12345567", + "mount-point": "/", + "total-space": 4096000, + "filesystem": "ext3", + }, + ), + ( + 0, + { + "device": "/dev/hda2", + "mount-point": "/usr", + "total-space": 4096000, + "filesystem": "ext3", + }, + ), + ( + 0, + { + "device": "/dev/devices/by-uuid/12345567", + "mount-point": "/mnt", + "total-space": 4096000, + "filesystem": "ext3", + }, + ), + ], + ) def test_no_message_if_not_accepted(self): """ @@ -507,21 +667,28 @@ self.mstore.set_accepted_types([]) # From this test data, we expect only two mount points to be returned, # and the third to be ignored (the rebound /dev/hda2 -> /mnt mounting) - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/devices/by-uuid/12345567 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /dev/devices/by-uuid/12345567 /mnt ext3 rw 0 0 -""") +""", + ) - mtab_filename = self.makeFile("""\ + mtab_filename = self.makeFile( + """\ /dev/hda1 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /opt /mnt none rw,bind 0 0 -""") +""", + ) plugin = MountInfo( - mounts_file=filename, create_time=self.reactor.time, - statvfs=statvfs_result_fixture, mtab_file=mtab_filename) + mounts_file=filename, + create_time=self.reactor.time, + statvfs=statvfs_result_fixture, + mtab_file=mtab_filename, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() @@ -538,9 +705,13 @@ with mock.patch.object(self.remote, "send_message"): self.reactor.fire( ("message-type-acceptance-changed", "mount-info"), - True) + True, + ) self.remote.send_message.assert_called_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) self.assertEqual(self.remote.send_message.call_count, 2) def test_persist_timing(self): @@ -551,29 +722,50 @@ didn't get the mount info at all. This test ensures that mount info are only saved when exchange happens. """ - filename = self.makeFile("""\ + filename = self.makeFile( + """\ /dev/hda1 / ext3 rw 0 0 -""") +""", + ) plugin = MountInfo( - mounts_file=filename, create_time=self.reactor.time, - statvfs=statvfs_result_fixture, mtab_file=filename) + mounts_file=filename, + create_time=self.reactor.time, + statvfs=statvfs_result_fixture, + mtab_file=filename, + ) self.monitor.add(plugin) plugin.run() message1 = plugin.create_mount_info_message() self.assertEqual( message1.get("mount-info"), - [(0, {"device": "/dev/hda1", - "filesystem": "ext3", - "mount-point": "/", - "total-space": 4096000})]) + [ + ( + 0, + { + "device": "/dev/hda1", + "filesystem": "ext3", + "mount-point": "/", + "total-space": 4096000, + }, + ), + ], + ) plugin.run() message2 = plugin.create_mount_info_message() self.assertEqual( message2.get("mount-info"), - [(0, {"device": "/dev/hda1", - "filesystem": "ext3", - "mount-point": "/", - "total-space": 4096000})]) + [ + ( + 0, + { + "device": "/dev/hda1", + "filesystem": "ext3", + "mount-point": "/", + "total-space": 4096000, + }, + ), + ], + ) # Run again, calling create_mount_info_message purge the information plugin.run() plugin.exchange() @@ -587,7 +779,9 @@ exchange of free-space messages. """ plugin = self.get_mount_info( - statvfs=statvfs_result_fixture, create_time=self.reactor.time) + statvfs=statvfs_result_fixture, + create_time=self.reactor.time, + ) # Limit the test exchange to 5 items. plugin.max_free_space_items_to_exchange = 5 step_size = self.monitor.step_size diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_networkactivity.py landscape-client-23.08/landscape/client/monitor/tests/test_networkactivity.py --- landscape-client-23.02/landscape/client/monitor/tests/test_networkactivity.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_networkactivity.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,8 @@ import socket + from landscape.client.monitor.networkactivity import NetworkActivity -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper class NetworkActivityTest(LandscapeTest): @@ -16,28 +18,41 @@ """ def setUp(self): - super(NetworkActivityTest, self).setUp() + super().setUp() self.activity_file = open(self.makeFile(), "w+") self.write_activity() self.plugin = NetworkActivity( network_activity_file=self.activity_file.name, - create_time=self.reactor.time) + create_time=self.reactor.time, + ) self.monitor.add(self.plugin) def tearDown(self): self.activity_file.close() - super(NetworkActivityTest, self).tearDown() + super().tearDown() - def write_activity(self, lo_in=0, lo_out=0, eth0_in=0, eth0_out=0, - extra="", lo_in_p=0, lo_out_p=0, **kw): - kw.update(dict( - lo_in=lo_in, - lo_out=lo_out, - lo_in_p=lo_in_p, - lo_out_p=lo_out_p, - eth0_in=eth0_in, - eth0_out=eth0_out, - extra=extra)) + def write_activity( + self, + lo_in=0, + lo_out=0, + eth0_in=0, + eth0_out=0, + extra="", + lo_in_p=0, + lo_out_p=0, + **kw, + ): + kw.update( + dict( + lo_in=lo_in, + lo_out=lo_out, + lo_in_p=lo_in_p, + lo_out_p=lo_out_p, + eth0_in=eth0_in, + eth0_out=eth0_out, + extra=extra, + ), + ) self.activity_file.seek(0, 0) self.activity_file.truncate() self.activity_file.write(self.stats_template % kw) @@ -58,7 +73,7 @@ # hmmm. try to connect anywhere to advance the net stats try: socket.socket().connect(("localhost", 9999)) - except socket.error: + except OSError: pass plugin.run() message = plugin.create_message() @@ -79,8 +94,7 @@ self.assertTrue(message) self.assertTrue("type" in message) self.assertEqual(message["type"], "network-activity") - self.assertEqual(message["activities"][b"lo"], - [(300, 10, 99)]) + self.assertEqual(message["activities"][b"lo"], [(300, 10, 99)]) # Ensure that b"eth0" is not in activities self.assertEqual(len(message["activities"]), 1) @@ -99,8 +113,7 @@ self.assertTrue(message) self.assertTrue("type" in message) self.assertEqual(message["type"], "network-activity") - self.assertEqual(message["activities"][b"lo"], - [(300, 9010, 9099)]) + self.assertEqual(message["activities"][b"lo"], [(300, 9010, 9099)]) # Ensure that b"eth0" is not in activities self.assertEqual(len(message["activities"]), 1) @@ -181,11 +194,18 @@ self.mstore.set_accepted_types([self.plugin.message_type]) self.plugin.exchange() step_size = self.monitor.step_size - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "network-activity", - "activities": {b"lo": [(step_size, 0, 1000)], - b"eth0": [(step_size, 0, 1000)], - }}]) + self.assertMessages( + self.mstore.get_pending_messages(), + [ + { + "type": "network-activity", + "activities": { + b"lo": [(step_size, 0, 1000)], + b"eth0": [(step_size, 0, 1000)], + }, + }, + ], + ) def test_config(self): """The network activity plugin is enabled by default.""" @@ -203,6 +223,7 @@ for i in range(50): result += row % (i, data, data) return result + for i in range(1, 10): data = i * 1000 self.write_activity(lo_out=data, eth0_out=data, extra=extra(data)) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_networkdevice.py landscape-client-23.08/landscape/client/monitor/tests/test_networkdevice.py --- landscape-client-23.02/landscape/client/monitor/tests/test_networkdevice.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_networkdevice.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,9 @@ -import mock +from unittest import mock -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -from landscape.lib.network import ( - get_active_device_info) from landscape.client.monitor.networkdevice import NetworkDevice +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.network import get_active_device_info def test_get_active_device_info(): @@ -16,11 +16,12 @@ helpers = [MonitorHelper] def setUp(self): - super(NetworkDeviceTest, self).setUp() + super().setUp() self.plugin = NetworkDevice(test_get_active_device_info) self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types( - [self.plugin.message_type]) + [self.plugin.message_type], + ) def test_get_network_device(self): """A message is sent with device info""" diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_packagemonitor.py landscape-client-23.08/landscape/client/monitor/tests/test_packagemonitor.py --- landscape-client-23.02/landscape/client/monitor/tests/test_packagemonitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_packagemonitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,13 +1,13 @@ import os -import mock +from unittest import mock from twisted.internet.defer import Deferred +from landscape.client.monitor.packagemonitor import PackageMonitor +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper from landscape.lib.apt.package.store import PackageStore - from landscape.lib.testing import EnvironSaverHelper -from landscape.client.monitor.packagemonitor import PackageMonitor -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper class PackageMonitorTest(LandscapeTest): @@ -17,20 +17,20 @@ def setUp(self): """Initialize test helpers and create a sample thermal zone.""" - super(PackageMonitorTest, self).setUp() + super().setUp() self.package_store_filename = self.makeFile() self.package_store = PackageStore(self.package_store_filename) self.package_monitor = PackageMonitor(self.package_store_filename) - def createReporterTask(self): + def createReporterTask(self): # noqa: N802 """ Put a task for the package reported into the package store. """ message = {"type": "package-ids", "ids": [None], "request-id": 1} return self.package_store.add_task("reporter", message) - def assertSingleReporterTask(self, data, task_id): + def assertSingleReporterTask(self, data, task_id): # noqa: N802 """ Check that we have exactly one task, that it contains the right data and that it's ID matches our expectation. @@ -55,14 +55,16 @@ If the package sqlite database file doesn't exist yet, it is created upon message handling. """ - filename = os.path.join(self.broker_service.config.data_path, - "package/database") + filename = os.path.join( + self.broker_service.config.data_path, + "package/database", + ) package_monitor = PackageMonitor() os.unlink(filename) self.assertFalse(os.path.isfile(filename)) self.monitor.add(package_monitor) - with mock.patch.object(package_monitor, 'spawn_reporter') as mocked: + with mock.patch.object(package_monitor, "spawn_reporter") as mocked: message = {"type": "package-ids"} self.monitor.dispatch_message(message) @@ -80,7 +82,7 @@ def test_do_not_spawn_reporter_if_message_not_accepted(self): self.monitor.add(self.package_monitor) - with mock.patch.object(self.package_monitor, 'spawn_reporter') as mkd: + with mock.patch.object(self.package_monitor, "spawn_reporter") as mkd: self.successResultOf(self.package_monitor.run()) self.assertEqual(mkd.mock_calls, []) @@ -97,12 +99,17 @@ run_result_deferred = real_run() return run_result_deferred.chainDeferred(deferred) - with (mock.patch.object(self.package_monitor, 'spawn_reporter') - ) as mock_spawn_reporter: - with mock.patch.object(self.package_monitor, 'run', - side_effect=run_has_run): - (self.broker_service.message_store - ).set_accepted_types(["packages"]) + with ( + mock.patch.object(self.package_monitor, "spawn_reporter") + ) as mock_spawn_reporter: + with mock.patch.object( + self.package_monitor, + "run", + side_effect=run_has_run, + ): + (self.broker_service.message_store).set_accepted_types( + ["packages"], + ) self.monitor.add(self.package_monitor) self.successResultOf(deferred) @@ -110,7 +117,7 @@ def test_spawn_reporter_on_run_if_message_accepted(self): self.broker_service.message_store.set_accepted_types(["packages"]) - with mock.patch.object(self.package_monitor, 'spawn_reporter') as mkd: + with mock.patch.object(self.package_monitor, "spawn_reporter") as mkd: self.monitor.add(self.package_monitor) # We want to ignore calls made as a result of the above line. mkd.reset_mock() @@ -121,7 +128,7 @@ def test_package_ids_handling(self): self.monitor.add(self.package_monitor) - with mock.patch.object(self.package_monitor, 'spawn_reporter'): + with mock.patch.object(self.package_monitor, "spawn_reporter"): message = {"type": "package-ids", "ids": [None], "request-id": 1} self.monitor.dispatch_message(message) task = self.package_store.get_next_task("reporter") @@ -133,7 +140,8 @@ command = self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\necho 'I am the reporter!' >&2\n") + "#!/bin/sh\necho 'I am the reporter!' >&2\n", + ) package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) @@ -150,7 +158,8 @@ self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\n/bin/true") + "#!/bin/sh\n/bin/true", + ) package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) @@ -166,7 +175,8 @@ command = self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\necho VAR: $VAR\n") + "#!/bin/sh\necho VAR: $VAR\n", + ) package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) @@ -186,7 +196,8 @@ command = self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\necho OPTIONS: $@\n") + "#!/bin/sh\necho OPTIONS: $@\n", + ) package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) @@ -201,10 +212,12 @@ return result.addCallback(got_result) def test_call_on_accepted(self): - with mock.patch.object(self.package_monitor, 'spawn_reporter') as mkd: + with mock.patch.object(self.package_monitor, "spawn_reporter") as mkd: self.monitor.add(self.package_monitor) self.monitor.reactor.fire( - ("message-type-acceptance-changed", "packages"), True) + ("message-type-acceptance-changed", "packages"), + True, + ) mkd.assert_called_once_with() @@ -270,7 +283,8 @@ self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\necho RUN\n") + "#!/bin/sh\necho RUN\n", + ) cwd = os.getcwd() self.addCleanup(os.chdir, cwd) dir = self.makeDir() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_plugin.py landscape-client-23.08/landscape/client/monitor/tests/test_plugin.py --- landscape-client-23.02/landscape/client/monitor/tests/test_plugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_plugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,14 @@ -from mock import ANY, Mock, patch - -from landscape.lib.testing import LogKeeperHelper +from unittest.mock import ANY +from unittest.mock import Mock +from unittest.mock import patch + +from landscape.client.monitor.plugin import DataWatcher +from landscape.client.monitor.plugin import MonitorPlugin +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper from landscape.lib.schema import Int +from landscape.lib.testing import LogKeeperHelper from landscape.message_schemas.message import Message -from landscape.client.monitor.plugin import MonitorPlugin, DataWatcher -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper class MonitorPluginTest(LandscapeTest): @@ -60,7 +64,10 @@ """ plugin = MonitorPlugin() plugin.register(self.monitor) - callback = (lambda: 1 / 0) + + def callback(): + return 1 / 0 + plugin.call_on_accepted("type", callback) self.reactor.fire(("message-type-acceptance-changed", "type"), False) @@ -130,12 +137,16 @@ self.mstore.add_schema(Message("wubble", {"wubblestuff": Int()})) def test_get_message(self): - self.assertEqual(self.plugin.get_message(), - {"type": "wubble", "wubblestuff": 1}) + self.assertEqual( + self.plugin.get_message(), + {"type": "wubble", "wubblestuff": 1}, + ) def test_get_message_unchanging(self): - self.assertEqual(self.plugin.get_message(), - {"type": "wubble", "wubblestuff": 1}) + self.assertEqual( + self.plugin.get_message(), + {"type": "wubble", "wubblestuff": 1}, + ) self.assertEqual(self.plugin.get_message(), None) def test_basic_exchange(self): @@ -145,9 +156,12 @@ messages = self.mstore.get_pending_messages() self.assertEqual(messages[0]["type"], "wubble") self.assertEqual(messages[0]["wubblestuff"], 1) - self.assertIn("Queueing a message with updated data watcher info for " - "landscape.client.monitor.tests.test_plugin." - "StubDataWatchingPlugin.", self.logfile.getvalue()) + self.assertIn( + "Queueing a message with updated data watcher info for " + "landscape.client.monitor.tests.test_plugin." + "StubDataWatchingPlugin.", + self.logfile.getvalue(), + ) def test_unchanging_value(self): # Is this really want we want to do? @@ -166,7 +180,10 @@ self.mstore.set_accepted_types(["wubble"]) self.plugin.exchange(True) self.remote.send_message.assert_called_once_with( - ANY, ANY, urgent=True) + ANY, + ANY, + urgent=True, + ) def test_no_message_if_not_accepted(self): """ diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_processorinfo.py landscape-client-23.08/landscape/client/monitor/tests/test_processorinfo.py --- landscape-client-23.02/landscape/client/monitor/tests/test_processorinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_processorinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,16 @@ -from landscape.lib.plugin import PluginConfigError +from unittest.mock import ANY +from unittest.mock import Mock + from landscape.client.monitor.processorinfo import ProcessorInfo -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -from mock import ANY, Mock +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.plugin import PluginConfigError # The extra blank line at the bottom of some sample data definitions # is intentional. + class ProcessorInfoTest(LandscapeTest): """Tests for CPU info plugin.""" @@ -14,8 +18,10 @@ def test_unknown_machine_name(self): """Ensure a PluginConfigError is raised for unknown machines.""" - self.assertRaises(PluginConfigError, - lambda: ProcessorInfo(machine_name="wubble")) + self.assertRaises( + PluginConfigError, + lambda: ProcessorInfo(machine_name="wubble"), + ) def test_read_proc_cpuinfo(self): """Ensure the plugin can parse /proc/cpuinfo.""" @@ -37,7 +43,8 @@ self.remote.send_message = Mock() self.reactor.fire( ("message-type-acceptance-changed", "processor-info"), - True) + True, + ) self.remote.send_message.assert_called_once_with(ANY, ANY, urgent=True) @@ -107,8 +114,7 @@ def test_read_sample_ppc_g5_data(self): """Ensure the plugin can parse /proc/cpuinfo from a dual PowerPC G5.""" filename = self.makeFile(self.SMP_PPC_G5) - plugin = ProcessorInfo(machine_name="ppc64", - source_filename=filename) + plugin = ProcessorInfo(machine_name="ppc64", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 2) @@ -116,20 +122,21 @@ processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) self.assertEqual(processor_0["processor-id"], 0) - self.assertEqual(processor_0["model"], - "PPC970FX, altivec supported") + self.assertEqual(processor_0["model"], "PPC970FX, altivec supported") processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 2) self.assertEqual(processor_1["processor-id"], 1) - self.assertEqual(processor_1["model"], - "PPC970FX, altivec supported") + self.assertEqual(processor_1["model"], "PPC970FX, altivec supported") def test_ppc_g5_cpu_info_same_as_last_known_cpu_info(self): """Test that one message is queued for duplicate G5 CPU info.""" filename = self.makeFile(self.SMP_PPC_G5) - plugin = ProcessorInfo(delay=0.1, machine_name="ppc64", - source_filename=filename) + plugin = ProcessorInfo( + delay=0.1, + machine_name="ppc64", + source_filename=filename, + ) self.monitor.add(plugin) plugin.run() plugin.run() @@ -143,21 +150,18 @@ processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) - self.assertEqual(processor_0["model"], - "PPC970FX, altivec supported") + self.assertEqual(processor_0["model"], "PPC970FX, altivec supported") self.assertEqual(processor_0["processor-id"], 0) processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 2) - self.assertEqual(processor_1["model"], - "PPC970FX, altivec supported") + self.assertEqual(processor_1["model"], "PPC970FX, altivec supported") self.assertEqual(processor_1["processor-id"], 1) def test_read_sample_ppc_g4_data(self): """Ensure the plugin can parse /proc/cpuinfo from a G4 PowerBook.""" filename = self.makeFile(self.UP_PPC_G4) - plugin = ProcessorInfo(machine_name="ppc", - source_filename=filename) + plugin = ProcessorInfo(machine_name="ppc", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) @@ -262,55 +266,54 @@ def test_read_sample_nokia_data(self): """Ensure the plugin can parse /proc/cpuinfo from a Nokia N810.""" filename = self.makeFile(self.ARM_NOKIA) - plugin = ProcessorInfo(machine_name="armv6l", - source_filename=filename) + plugin = ProcessorInfo(machine_name="armv6l", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) - self.assertEqual(processor_0["model"], - "ARMv6-compatible processor rev 2 (v6l)") + self.assertEqual( + processor_0["model"], + "ARMv6-compatible processor rev 2 (v6l)", + ) self.assertEqual(processor_0["processor-id"], 0) def test_read_sample_armv7_data(self): """Ensure the plugin can parse /proc/cpuinfo from a sample ARMv7.""" filename = self.makeFile(self.ARMv7) - plugin = ProcessorInfo(machine_name="armv7l", - source_filename=filename) + plugin = ProcessorInfo(machine_name="armv7l", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 3) - self.assertEqual(processor_0["model"], - "ARMv7 Processor rev 1 (v7l)") + self.assertEqual(processor_0["model"], "ARMv7 Processor rev 1 (v7l)") self.assertEqual(processor_0["processor-id"], 0) self.assertEqual(processor_0["cache-size"], 768) def test_read_sample_armv7_reverse_data(self): """Ensure the plugin can parse a reversed sample ARMv7 /proc/cpuinfo""" filename = self.makeFile(self.ARMv7_reverse) - plugin = ProcessorInfo(machine_name="armv7l", - source_filename=filename) + plugin = ProcessorInfo(machine_name="armv7l", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 3) - self.assertEqual(processor_0["model"], - "ARMv7 Processor rev 1 (v7l)") + self.assertEqual(processor_0["model"], "ARMv7 Processor rev 1 (v7l)") self.assertEqual(processor_0["processor-id"], 0) self.assertEqual(processor_0["cache-size"], 768) def test_read_sample_armv8_data(self): """Ensure the plugin can parse /proc/cpuinfo from a sample ARMv8.""" filename = self.makeFile(self.ARMv8_64) - plugin = ProcessorInfo(machine_name="aarch64", - source_filename=filename) + plugin = ProcessorInfo( + machine_name="aarch64", + source_filename=filename, + ) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) @@ -319,7 +322,8 @@ self.assertEqual(len(processor_0), 2) self.assertEqual( processor_0["model"], - "AArch64 Processor rev 0 (aarch64)") + "AArch64 Processor rev 0 (aarch64)", + ) self.assertEqual(processor_0["processor-id"], 0) @@ -350,22 +354,22 @@ def test_read_sample_sparc_data(self): """Ensure the plugin can parse /proc/cpuinfo from a dual UltraSparc.""" filename = self.makeFile(self.SMP_SPARC) - plugin = ProcessorInfo(machine_name="sparc64", - source_filename=filename) + plugin = ProcessorInfo( + machine_name="sparc64", + source_filename=filename, + ) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 2) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) - self.assertEqual(processor_0["model"], - "TI UltraSparc IIIi (Jalapeno)") + self.assertEqual(processor_0["model"], "TI UltraSparc IIIi (Jalapeno)") self.assertEqual(processor_0["processor-id"], 0) processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 2) - self.assertEqual(processor_1["model"], - "TI UltraSparc IIIi (Jalapeno)") + self.assertEqual(processor_1["model"], "TI UltraSparc IIIi (Jalapeno)") self.assertEqual(processor_1["processor-id"], 1) @@ -378,7 +382,7 @@ vendor_id : IBM/S390 # processors : 4 bogomips per cpu: 3033.00 -features : esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs +features : esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs cache0 : level=1 type=Data scope=Private size=128K line_size=256 associativity=8 cache1 : level=1 type=Instruction scope=Private size=96K line_size=256 associativity=6 cache2 : level=2 type=Data scope=Private size=2048K line_size=256 associativity=8 @@ -394,19 +398,21 @@ def test_read_sample_s390x_data(self): """Ensure the plugin can parse /proc/cpuinfo for IBM zSeries.""" filename = self.makeFile(self.S390X) - plugin = ProcessorInfo(machine_name="s390x", - source_filename=filename) + plugin = ProcessorInfo(machine_name="s390x", source_filename=filename) message = plugin.create_message() self.assertEqual("processor-info", message["type"]) self.assertEqual(4, len(message["processors"])) for id, processor in enumerate(message["processors"]): self.assertEqual( - {"vendor": "IBM/S390", - "model": "2964", - "processor-id": id, - "cache-size": 491520, - }, processor) + { + "vendor": "IBM/S390", + "model": "2964", + "processor-id": id, + "cache-size": 491520, + }, + processor, + ) class X86MessageTest(LandscapeTest): @@ -486,8 +492,7 @@ def test_read_sample_opteron_data(self): """Ensure the plugin can parse /proc/cpuinfo from a dual Opteron.""" filename = self.makeFile(self.SMP_OPTERON) - plugin = ProcessorInfo(machine_name="x86_64", - source_filename=filename) + plugin = ProcessorInfo(machine_name="x86_64", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 2) @@ -495,43 +500,50 @@ processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 4) self.assertEqual(processor_0["vendor"], "AuthenticAMD") - self.assertEqual(processor_0["model"], - "AMD Opteron(tm) Processor 250") + self.assertEqual(processor_0["model"], "AMD Opteron(tm) Processor 250") self.assertEqual(processor_0["cache-size"], 1024) self.assertEqual(processor_0["processor-id"], 0) processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 4) self.assertEqual(processor_1["vendor"], "AuthenticAMD") - self.assertEqual(processor_1["model"], - "AMD Opteron(tm) Processor 250") + self.assertEqual(processor_1["model"], "AMD Opteron(tm) Processor 250") self.assertEqual(processor_1["cache-size"], 1024) self.assertEqual(processor_1["processor-id"], 1) def test_plugin_manager(self): """Test plugin manager integration.""" filename = self.makeFile(self.UP_PENTIUM_M) - plugin = ProcessorInfo(delay=0.1, machine_name="i686", - source_filename=filename) + plugin = ProcessorInfo( + delay=0.1, + machine_name="i686", + source_filename=filename, + ) self.monitor.add(plugin) self.reactor.advance(0.5) self.monitor.exchange() self.assertMessages( self.mstore.get_pending_messages(), - [{"type": "processor-info", - "processors": [ - {"vendor": "GenuineIntel", - "model": "Intel(R) Pentium(R) M processor 1.50GHz", - "cache-size": 2048, - "processor-id": 0}], - }]) + [ + { + "type": "processor-info", + "processors": [ + { + "vendor": "GenuineIntel", + "model": "Intel(R) Pentium(R) M processor 1.50GHz", + "cache-size": 2048, + "processor-id": 0, + }, + ], + }, + ], + ) def test_read_sample_pentium_m_data(self): """Ensure the plugin can parse /proc/cpuinfo from a Pentium-M.""" filename = self.makeFile(self.UP_PENTIUM_M) - plugin = ProcessorInfo(machine_name="i686", - source_filename=filename) + plugin = ProcessorInfo(machine_name="i686", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) @@ -539,8 +551,10 @@ processor = message["processors"][0] self.assertEqual(len(processor), 4) self.assertEqual(processor["vendor"], "GenuineIntel") - self.assertEqual(processor["model"], - "Intel(R) Pentium(R) M processor 1.50GHz") + self.assertEqual( + processor["model"], + "Intel(R) Pentium(R) M processor 1.50GHz", + ) self.assertEqual(processor["cache-size"], 2048) self.assertEqual(processor["processor-id"], 0) @@ -548,8 +562,11 @@ """Test that one message is queued for duplicate Pentium-M CPU info.""" filename = self.makeFile(self.UP_PENTIUM_M) - plugin = ProcessorInfo(delay=0.1, machine_name="i686", - source_filename=filename) + plugin = ProcessorInfo( + delay=0.1, + machine_name="i686", + source_filename=filename, + ) self.monitor.add(plugin) self.monitor.add(plugin) self.reactor.call_later(0.5, self.reactor.stop) @@ -565,15 +582,20 @@ processor = message["processors"][0] self.assertEqual(len(processor), 4) self.assertEqual(processor["vendor"], "GenuineIntel") - self.assertEqual(processor["model"], - "Intel(R) Pentium(R) M processor 1.50GHz") + self.assertEqual( + processor["model"], + "Intel(R) Pentium(R) M processor 1.50GHz", + ) self.assertEqual(processor["cache-size"], 2048) self.assertEqual(processor["processor-id"], 0) def test_unchanging_data(self): filename = self.makeFile(self.UP_PENTIUM_M) - plugin = ProcessorInfo(delay=0.1, machine_name="i686", - source_filename=filename) + plugin = ProcessorInfo( + delay=0.1, + machine_name="i686", + source_filename=filename, + ) self.monitor.add(plugin) plugin.run() plugin.run() @@ -581,8 +603,11 @@ def test_changing_data(self): filename = self.makeFile(self.UP_PENTIUM_M) - plugin = ProcessorInfo(delay=0.1, machine_name="i686", - source_filename=filename) + plugin = ProcessorInfo( + delay=0.1, + machine_name="i686", + source_filename=filename, + ) self.monitor.add(plugin) plugin.run() self.makeFile(self.SMP_OPTERON, path=filename) @@ -597,8 +622,11 @@ """ self.mstore.set_accepted_types([]) filename = self.makeFile(self.UP_PENTIUM_M) - plugin = ProcessorInfo(delay=0.1, machine_name="i686", - source_filename=filename) + plugin = ProcessorInfo( + delay=0.1, + machine_name="i686", + source_filename=filename, + ) self.monitor.add(plugin) self.mstore.set_accepted_types(["processor-info"]) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_rebootrequired.py landscape-client-23.08/landscape/client/monitor/tests/test_rebootrequired.py --- landscape-client-23.02/landscape/client/monitor/tests/test_rebootrequired.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_rebootrequired.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,9 @@ -import mock +from unittest import mock -from landscape.lib.testing import LogKeeperHelper from landscape.client.monitor.rebootrequired import RebootRequired -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.testing import LogKeeperHelper class RebootRequiredTest(LandscapeTest): @@ -10,7 +11,7 @@ helpers = [MonitorHelper, LogKeeperHelper] def setUp(self): - super(RebootRequiredTest, self).setUp() + super().setUp() self.reboot_required_filename = self.makeFile() self.plugin = RebootRequired(self.reboot_required_filename) self.monitor.add(self.plugin) @@ -31,8 +32,10 @@ reboot-required packages file if present, or an empty list otherwise. """ self.assertEqual([], self.plugin._get_packages()) - self.makeFile(path=self.reboot_required_filename + ".pkgs", - content="foo\nbar\n") + self.makeFile( + path=self.reboot_required_filename + ".pkgs", + content="foo\nbar\n", + ) self.assertEqual(["bar", "foo"], self.plugin._get_packages()) def test_wb_get_packages_with_duplicates(self): @@ -41,8 +44,10 @@ not contain duplicate values. """ self.assertEqual([], self.plugin._get_packages()) - self.makeFile(path=self.reboot_required_filename + ".pkgs", - content="foo\nfoo\n") + self.makeFile( + path=self.reboot_required_filename + ".pkgs", + content="foo\nfoo\n", + ) self.assertEqual(["foo"], self.plugin._get_packages()) def test_wb_get_packages_with_blank_lines(self): @@ -50,8 +55,10 @@ Blank lines are ignored by L{RebootRequired._get_packages}. """ self.assertEqual([], self.plugin._get_packages()) - self.makeFile(path=self.reboot_required_filename + ".pkgs", - content="bar\n\nfoo\n") + self.makeFile( + path=self.reboot_required_filename + ".pkgs", + content="bar\n\nfoo\n", + ) self.assertEqual(["bar", "foo"], self.plugin._get_packages()) def test_wb_create_message(self): @@ -59,31 +66,43 @@ A message should be created if and only if the reboot-required status of the system has changed. """ - self.assertEqual({"flag": False, "packages": []}, - self.plugin._create_message()) + self.assertEqual( + {"flag": False, "packages": []}, + self.plugin._create_message(), + ) self.makeFile(path=self.reboot_required_filename, content="") - self.assertEqual({"flag": True}, - self.plugin._create_message()) - self.makeFile(path=self.reboot_required_filename + ".pkgs", - content="foo\n") - self.assertEqual({"packages": [u"foo"]}, - self.plugin._create_message()) + self.assertEqual({"flag": True}, self.plugin._create_message()) + self.makeFile( + path=self.reboot_required_filename + ".pkgs", + content="foo\n", + ) + self.assertEqual({"packages": ["foo"]}, self.plugin._create_message()) def test_send_message(self): """ A new C{"reboot-required-info"} message should be enqueued if and only if the reboot-required status of the system has changed. """ - self.makeFile(path=self.reboot_required_filename + ".pkgs", - content="foo\n") + self.makeFile( + path=self.reboot_required_filename + ".pkgs", + content="foo\n", + ) self.makeFile(path=self.reboot_required_filename, content="") self.plugin.send_message() - self.assertIn("Queueing message with updated reboot-required status.", - self.logfile.getvalue()) - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "reboot-required-info", - "flag": True, - "packages": [u"foo"]}]) + self.assertIn( + "Queueing message with updated reboot-required status.", + self.logfile.getvalue(), + ) + self.assertMessages( + self.mstore.get_pending_messages(), + [ + { + "type": "reboot-required-info", + "flag": True, + "packages": ["foo"], + }, + ], + ) self.mstore.delete_all_messages() self.plugin.send_message() self.assertMessages(self.mstore.get_pending_messages(), []) @@ -108,7 +127,10 @@ with mock.patch.object(self.remote, "send_message"): self.plugin.run() self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) self.mstore.set_accepted_types([]) self.plugin.run() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_service.py landscape-client-23.08/landscape/client/monitor/tests/test_service.py --- landscape-client-23.02/landscape/client/monitor/tests/test_service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,13 @@ -from mock import Mock +from unittest.mock import Mock -from landscape.lib.testing import FakeReactor -from landscape.client.tests.helpers import ( - LandscapeTest, FakeBrokerServiceHelper) -from landscape.client.monitor.config import MonitorConfiguration, ALL_PLUGINS -from landscape.client.monitor.service import MonitorService from landscape.client.monitor.computerinfo import ComputerInfo +from landscape.client.monitor.config import ALL_PLUGINS +from landscape.client.monitor.config import MonitorConfiguration from landscape.client.monitor.loadaverage import LoadAverage +from landscape.client.monitor.service import MonitorService +from landscape.client.tests.helpers import FakeBrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.testing import FakeReactor class MonitorServiceTest(LandscapeTest): @@ -14,7 +15,7 @@ helpers = [FakeBrokerServiceHelper] def setUp(self): - super(MonitorServiceTest, self).setUp() + super().setUp() config = MonitorConfiguration() config.load(["-c", self.config_filename]) @@ -36,8 +37,9 @@ If the C{--monitor-plugins} command line option is specified, only the given plugins will be enabled. """ - self.service.config.load(["--monitor-plugins", - "ComputerInfo, LoadAverage"]) + self.service.config.load( + ["--monitor-plugins", "ComputerInfo, LoadAverage"], + ) plugins = self.service.get_plugins() self.assertTrue(isinstance(plugins[0], ComputerInfo)) self.assertTrue(isinstance(plugins[1], LoadAverage)) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_snapmonitor.py landscape-client-23.08/landscape/client/monitor/tests/test_snapmonitor.py --- landscape-client-23.02/landscape/client/monitor/tests/test_snapmonitor.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_snapmonitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,50 @@ +from unittest.mock import Mock + +from landscape.client.monitor.snapmonitor import SnapMonitor +from landscape.client.snap.http import SnapdHttpException, SnapHttp +from landscape.client.tests.helpers import LandscapeTest, MonitorHelper + + +class SnapMonitorTest(LandscapeTest): + """Snap plugin tests.""" + + helpers = [MonitorHelper] + + def setUp(self): + super(SnapMonitorTest, self).setUp() + self.mstore.set_accepted_types(["snaps"]) + + def test_get_data(self): + """Tests getting installed snap data.""" + plugin = SnapMonitor() + self.monitor.add(plugin) + + plugin.exchange() + + messages = self.mstore.get_pending_messages() + + self.assertTrue(len(messages) > 0) + self.assertIn("installed", messages[0]["snaps"]) + + def test_get_data_snapd_http_exception(self): + """ + Tests that we return no data if there is an error getting it. + """ + snap_http_mock = Mock( + spec=SnapHttp, + get_snaps=Mock(side_effect=SnapdHttpException) + ) + plugin = SnapMonitor() + plugin._snap_http = snap_http_mock + self.monitor.add(plugin) + + with self.assertLogs(level="ERROR") as cm: + plugin.exchange() + + messages = self.mstore.get_pending_messages() + + self.assertEqual(len(messages), 0) + self.assertEqual( + cm.output, + ["ERROR:root:Unable to list installed snaps: "] + ) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_swiftusage.py landscape-client-23.08/landscape/client/monitor/tests/test_swiftusage.py --- landscape-client-23.02/landscape/client/monitor/tests/test_swiftusage.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_swiftusage.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,22 +1,24 @@ -from twisted.internet.defer import succeed from unittest import skipUnless +from unittest.mock import ANY +from unittest.mock import Mock + +from twisted.internet.defer import succeed from landscape.client.monitor.swiftusage import SwiftUsage -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -from mock import ANY, Mock +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper try: from swift.cli.recon import Scout + has_swift = True except ImportError: has_swift = False -class FakeRing(object): +class FakeRing: def __init__(self, ip_port_tuples=[]): - self.devs = [ - {"ip": ip, "port": port} - for ip, port in ip_port_tuples] + self.devs = [{"ip": ip, "port": port} for ip, port in ip_port_tuples] class SwiftUsageTest(LandscapeTest): @@ -28,7 +30,9 @@ LandscapeTest.setUp(self) self.mstore.set_accepted_types(["swift-usage"]) self.plugin = SwiftUsage( - create_time=self.reactor.time, swift_ring=self.makeFile("ring")) + create_time=self.reactor.time, + swift_ring=self.makeFile("ring"), + ) self.plugin._has_swift = True def test_wb_should_run_not_active(self): @@ -55,7 +59,9 @@ file is not found. """ plugin = SwiftUsage( - create_time=self.reactor.time, swift_ring=self.makeFile()) + create_time=self.reactor.time, + swift_ring=self.makeFile(), + ) plugin._has_swift = True self.assertFalse(plugin._should_run()) @@ -65,7 +71,9 @@ ring is properly configured. """ plugin = SwiftUsage( - create_time=self.reactor.time, swift_ring=self.makeFile("ring")) + create_time=self.reactor.time, + swift_ring=self.makeFile("ring"), + ) plugin._has_swift = True self.assertTrue(plugin._should_run()) @@ -76,8 +84,10 @@ with the latest swift device information will be delivered in a single message. """ - points = [(1234, "sdb", 100000, 80000, 20000), - (1234, "sdc", 200000, 120000, 800000)] + points = [ + (1234, "sdb", 100000, 80000, 20000), + (1234, "sdc", 200000, 120000, 800000), + ] self.plugin._swift_usage_points = points self.monitor.add(self.plugin) @@ -99,12 +109,16 @@ def test_create_message(self): """L{SwiftUsage.create_message} returns a 'swift-usage' message.""" - points = [(1234, "sdb", 100000, 80000, 20000), - (1234, "sdc", 200000, 120000, 80000)] + points = [ + (1234, "sdb", 100000, 80000, 20000), + (1234, "sdc", 200000, 120000, 80000), + ] self.plugin._swift_usage_points = points message = self.plugin.create_message() self.assertEqual( - {"type": "swift-usage", "data-points": points}, message) + {"type": "swift-usage", "data-points": points}, + message, + ) def test_create_message_empty(self): """ @@ -152,7 +166,9 @@ self.remote.send_message = Mock(return_value=succeed(None)) self.reactor.fire( - ("message-type-acceptance-changed", "swift-usage"), True) + ("message-type-acceptance-changed", "swift-usage"), + True, + ) self.remote.send_message.assert_called_once_with(ANY, ANY, urgent=True) def test_message_only_mounted_devices(self): @@ -160,21 +176,28 @@ The plugin only collects usage for mounted devices. """ recon_response = [ - {"device": "vdb", - "mounted": True, - "size": 100000, - "avail": 80000, - "used": 20000}, - {"device": "vdc", - "mounted": False, - "size": "", - "avail": "", - "used": ""}, - {"device": "vdd", - "mounted": True, - "size": 200000, - "avail": 10000, - "used": 190000}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 80000, + "used": 20000, + }, + { + "device": "vdc", + "mounted": False, + "size": "", + "avail": "", + "used": "", + }, + { + "device": "vdd", + "mounted": True, + "size": 200000, + "avail": 10000, + "used": 190000, + }, + ] self.plugin._perform_recon_call = lambda host: succeed(recon_response) self.plugin._get_recon_host = lambda: ("192.168.1.10", 6000) @@ -183,11 +206,16 @@ self.plugin._handle_usage(recon_response) self.assertEqual( - [(30, "vdb", 100000, 80000, 20000), - (30, "vdd", 200000, 10000, 190000)], - self.plugin._swift_usage_points) + [ + (30, "vdb", 100000, 80000, 20000), + (30, "vdd", 200000, 10000, 190000), + ], + self.plugin._swift_usage_points, + ) self.assertEqual( - ["vdb", "vdd"], sorted(self.plugin._persist.get("devices"))) + ["vdb", "vdd"], + sorted(self.plugin._persist.get("devices")), + ) self.assertNotIn("vdc", self.plugin._persist.get("usage")) def test_message_remove_disappeared_devices(self): @@ -195,16 +223,21 @@ Usage for devices that have disappeared are removed from the persist. """ recon_response = [ - {"device": "vdb", - "mounted": True, - "size": 100000, - "avail": 80000, - "used": 20000}, - {"device": "vdc", - "mounted": True, - "size": 200000, - "avail": 10000, - "used": 190000}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 80000, + "used": 20000, + }, + { + "device": "vdc", + "mounted": True, + "size": 200000, + "avail": 10000, + "used": 190000, + }, + ] self.plugin._perform_recon_call = lambda host: succeed(recon_response) self.plugin._get_recon_host = lambda: ("192.168.1.10", 6000) @@ -212,14 +245,19 @@ self.reactor.advance(self.monitor.step_size) self.plugin._handle_usage(recon_response) self.assertEqual( - ["vdb", "vdc"], sorted(self.plugin._persist.get("devices"))) + ["vdb", "vdc"], + sorted(self.plugin._persist.get("devices")), + ) recon_response = [ - {"device": "vdb", - "mounted": True, - "size": 100000, - "avail": 70000, - "used": 30000}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 70000, + "used": 30000, + }, + ] self.reactor.advance(self.monitor.step_size) self.plugin._handle_usage(recon_response) self.assertNotIn("vdc", self.plugin._persist.get("usage")) @@ -231,16 +269,21 @@ persist. """ recon_response = [ - {"device": "vdb", - "mounted": True, - "size": 100000, - "avail": 80000, - "used": 20000}, - {"device": "vdc", - "mounted": True, - "size": 200000, - "avail": 10000, - "used": 190000}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 80000, + "used": 20000, + }, + { + "device": "vdc", + "mounted": True, + "size": 200000, + "avail": 10000, + "used": 190000, + }, + ] self.plugin._perform_recon_call = lambda host: succeed(recon_response) self.plugin._get_recon_host = lambda: ("192.168.1.10", 6000) @@ -248,19 +291,26 @@ self.reactor.advance(self.monitor.step_size) self.plugin._handle_usage(recon_response) self.assertEqual( - ["vdb", "vdc"], sorted(self.plugin._persist.get("devices"))) + ["vdb", "vdc"], + sorted(self.plugin._persist.get("devices")), + ) recon_response = [ - {"device": "vdb", - "mounted": True, - "size": 100000, - "avail": 70000, - "used": 30000}, - {"device": "vdc", - "mounted": False, - "size": "", - "avail": "", - "used": ""}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 70000, + "used": 30000, + }, + { + "device": "vdc", + "mounted": False, + "size": "", + "avail": "", + "used": "", + }, + ] self.reactor.advance(self.monitor.step_size) self.plugin._handle_usage(recon_response) self.assertNotIn("vdc", self.plugin._persist.get("usage")) @@ -274,13 +324,21 @@ """ plugin = SwiftUsage(create_time=self.reactor.time) expected_disk_usage = [ - {u"device": u"vdb", - u"mounted": True, - u"size": 100000, - u"avail": 70000, - u"used": 30000}] - Scout.scout = lambda _, host: ("recon_url", expected_disk_usage, 200, - 1459286522.711885, 1459286522.716989) + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 70000, + "used": 30000, + }, + ] + Scout.scout = lambda _, host: ( + "recon_url", + expected_disk_usage, + 200, + 1459286522.711885, + 1459286522.716989, + ) host = ("192.168.1.10", 6000) response = plugin._perform_recon_call(host) self.assertEqual(response, expected_disk_usage) @@ -293,11 +351,14 @@ """ plugin = SwiftUsage(create_time=self.reactor.time) expected_disk_usage = [ - {u"device": u"vdb", - u"mounted": True, - u"size": 100000, - u"avail": 70000, - u"used": 30000}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 70000, + "used": 30000, + }, + ] Scout.scout = lambda _, host: ("recon_url", expected_disk_usage, 200) host = ("192.168.1.10", 6000) response = plugin._perform_recon_call(host) @@ -308,19 +369,21 @@ Checks that unicode responses can be processed without errors """ recon_response = [ - {u"device": u"vdb", - u"mounted": True, - u"size": 100000, - u"avail": 70000, - u"used": 30000}] + { + "device": "vdb", + "mounted": True, + "size": 100000, + "avail": 70000, + "used": 30000, + }, + ] self.plugin._perform_recon_call = lambda host: succeed(recon_response) self.plugin._get_recon_host = lambda: ("192.168.1.10", 6000) self.monitor.add(self.plugin) self.reactor.advance(self.monitor.step_size) self.plugin._handle_usage(recon_response) - self.assertEqual( - [u"vdb"], sorted(self.plugin._persist.get("devices"))) + self.assertEqual(["vdb"], sorted(self.plugin._persist.get("devices"))) def test_wb_handle_usage_with_invalid_data(self): """Checks that _handle_usage does not raise with invalid recon data. diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_temperature.py landscape-client-23.08/landscape/client/monitor/tests/test_temperature.py --- landscape-client-23.02/landscape/client/monitor/tests/test_temperature.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_temperature.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,11 @@ -import mock import os import tempfile +from unittest import mock from landscape.client.monitor.temperature import Temperature +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper from landscape.lib.tests.test_sysstats import ThermalZoneTest -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper class TemperatureTestWithSampleData(ThermalZoneTest, LandscapeTest): @@ -14,7 +15,7 @@ def setUp(self): """Initialize test helpers and create a sample thermal zone.""" - super(TemperatureTestWithSampleData, self).setUp() + super().setUp() self.mstore.set_accepted_types(["temperature"]) self.write_thermal_zone("ZONE1", "50000") @@ -47,8 +48,10 @@ creates messages with changes reported correctly. """ self.write_thermal_zone("ZONE2", "50000") - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) @@ -62,17 +65,13 @@ self.assertEqual(messages[0]["thermal-zone"], "ZONE1") self.assertEqual(len(messages[0]["temperatures"]), 2) - self.assertEqual(messages[0]["temperatures"][0], - (step_size, 50.0)) - self.assertEqual(messages[0]["temperatures"][1], - (step_size * 2, 50.0)) + self.assertEqual(messages[0]["temperatures"][0], (step_size, 50.0)) + self.assertEqual(messages[0]["temperatures"][1], (step_size * 2, 50.0)) self.assertEqual(messages[1]["thermal-zone"], "ZONE2") self.assertEqual(len(messages[1]["temperatures"]), 2) - self.assertEqual(messages[1]["temperatures"][0], - (step_size, 50.0)) - self.assertEqual(messages[1]["temperatures"][1], - (step_size * 2, 56.0)) + self.assertEqual(messages[1]["temperatures"][0], (step_size, 50.0)) + self.assertEqual(messages[1]["temperatures"][1], (step_size * 2, 56.0)) def test_messaging_flushes(self): """ @@ -80,8 +79,10 @@ available, a message with an empty C{temperatures} list is expected. """ - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) @@ -99,8 +100,10 @@ should not be queued. """ self.write_thermal_zone("ZONE2", "50000") - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.assertEqual(len(self.mstore.get_pending_messages()), 0) @@ -112,20 +115,30 @@ delivered in a single message. """ self.write_thermal_zone("ZONE2", "50000") - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) self.monitor.exchange() - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "temperature", - "thermal-zone": "ZONE1", - "temperatures": [(step_size, 50.0)]}, - {"type": "temperature", - "thermal-zone": "ZONE2", - "temperatures": [(step_size, 50.0)]}]) + self.assertMessages( + self.mstore.get_pending_messages(), + [ + { + "type": "temperature", + "thermal-zone": "ZONE1", + "temperatures": [(step_size, 50.0)], + }, + { + "type": "temperature", + "thermal-zone": "ZONE2", + "temperatures": [(step_size, 50.0)], + }, + ], + ) def test_no_messages_on_bad_values(self): """ @@ -133,8 +146,10 @@ break and no messages are sent. """ self.write_thermal_zone("ZONE1", "UNKNOWN C") - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) @@ -143,17 +158,24 @@ self.assertMessages(self.mstore.get_pending_messages(), []) def test_call_on_accepted(self): - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.reactor.advance(plugin.registry.step_size) with mock.patch.object(self.remote, "send_message"): - self.reactor.fire(("message-type-acceptance-changed", - "temperature"), True) + self.reactor.fire( + ("message-type-acceptance-changed", "temperature"), + True, + ) self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY, urgent=True) + mock.ANY, + mock.ANY, + urgent=True, + ) def test_no_message_if_not_accepted(self): """ @@ -161,8 +183,10 @@ accepting their type. """ self.mstore.set_accepted_types([]) - plugin = Temperature(thermal_zone_path=self.thermal_zone_path, - create_time=self.reactor.time) + plugin = Temperature( + thermal_zone_path=self.thermal_zone_path, + create_time=self.reactor.time, + ) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_ubuntuproinfo.py landscape-client-23.08/landscape/client/monitor/tests/test_ubuntuproinfo.py --- landscape-client-23.02/landscape/client/monitor/tests/test_ubuntuproinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_ubuntuproinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,8 @@ from unittest import mock from landscape.client.monitor.ubuntuproinfo import UbuntuProInfo -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper class UbuntuProInfoTest(LandscapeTest): @@ -10,7 +11,7 @@ helpers = [MonitorHelper] def setUp(self): - super(UbuntuProInfoTest, self).setUp() + super().setUp() self.mstore.set_accepted_types(["ubuntu-pro-info"]) def test_ubuntu_pro_info(self): @@ -20,7 +21,7 @@ with mock.patch("subprocess.run") as run_mock: run_mock.return_value = mock.Mock( - stdout="\"This is a test\"", + stdout='"This is a test"', ) plugin.exchange() @@ -28,8 +29,7 @@ run_mock.assert_called_once() self.assertTrue(len(messages) > 0) self.assertTrue("ubuntu-pro-info" in messages[0]) - self.assertEqual(messages[0]["ubuntu-pro-info"], - "\"This is a test\"") + self.assertEqual(messages[0]["ubuntu-pro-info"], '"This is a test"') def test_ubuntu_pro_info_no_ua(self): """Tests calling `ua status` when it is not installed.""" diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_ubuntuprorebootrequired.py landscape-client-23.08/landscape/client/monitor/tests/test_ubuntuprorebootrequired.py --- landscape-client-23.02/landscape/client/monitor/tests/test_ubuntuprorebootrequired.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_ubuntuprorebootrequired.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,29 @@ +from landscape.client.monitor.ubuntuprorebootrequired import ( + UbuntuProRebootRequired, +) +from landscape.client.tests.helpers import LandscapeTest, MonitorHelper + + +class UbuntuProRebootRequiredTest(LandscapeTest): + """Ubuntu Pro required plugin tests.""" + + helpers = [MonitorHelper] + + def setUp(self): + super().setUp() + self.mstore.set_accepted_types(["ubuntu-pro-reboot-required"]) + + def test_ubuntu_pro_reboot_required(self): + """Tests calling reboot required.""" + + plugin = UbuntuProRebootRequired() + self.monitor.add(plugin) + plugin.exchange() + + messages = self.mstore.get_pending_messages() + + self.assertGreater(len(messages), 0) + self.assertIn("ubuntu-pro-reboot-required", messages[0]) + self.assertIn( + "reboot_required", messages[0]["ubuntu-pro-reboot-required"] + ) diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_updatemanager.py landscape-client-23.08/landscape/client/monitor/tests/test_updatemanager.py --- landscape-client-23.02/landscape/client/monitor/tests/test_updatemanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_updatemanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,9 @@ -import mock +from unittest import mock -from landscape.lib.testing import LogKeeperHelper from landscape.client.monitor.updatemanager import UpdateManager -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper +from landscape.lib.testing import LogKeeperHelper class UpdateManagerTest(LandscapeTest): @@ -15,7 +16,7 @@ helpers = [MonitorHelper, LogKeeperHelper] def setUp(self): - super(UpdateManagerTest, self).setUp() + super().setUp() self.update_manager_filename = self.makeFile() self.plugin = UpdateManager(self.update_manager_filename) self.monitor.add(self.plugin) @@ -65,11 +66,14 @@ """ self.makeFile(path=self.update_manager_filename, content=content) self.plugin.send_message() - self.assertIn("Queueing message with updated update-manager status.", - self.logfile.getvalue()) - self.assertMessages(self.mstore.get_pending_messages(), - [{"type": "update-manager-info", - "prompt": u"never"}]) + self.assertIn( + "Queueing message with updated update-manager status.", + self.logfile.getvalue(), + ) + self.assertMessages( + self.mstore.get_pending_messages(), + [{"type": "update-manager-info", "prompt": "never"}], + ) self.mstore.delete_all_messages() self.plugin.send_message() self.assertMessages(self.mstore.get_pending_messages(), []) @@ -94,7 +98,9 @@ with mock.patch.object(self.remote, "send_message"): self.plugin.run() self.remote.send_message.assert_called_once_with( - mock.ANY, mock.ANY) + mock.ANY, + mock.ANY, + ) self.mstore.set_accepted_types([]) self.plugin.run() diff -Nru landscape-client-23.02/landscape/client/monitor/tests/test_usermonitor.py landscape-client-23.08/landscape/client/monitor/tests/test_usermonitor.py --- landscape-client-23.02/landscape/client/monitor/tests/test_usermonitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/tests/test_usermonitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,17 @@ import os +from unittest.mock import ANY +from unittest.mock import Mock -from mock import Mock, ANY from twisted.internet.defer import fail +import landscape.client.monitor.usermonitor from landscape.client.amp import ComponentPublisher -from landscape.client.monitor.usermonitor import ( - UserMonitor, RemoteUserMonitorConnector) from landscape.client.manager.usermanager import UserManager +from landscape.client.monitor.usermonitor import RemoteUserMonitorConnector +from landscape.client.monitor.usermonitor import UserMonitor +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper from landscape.client.user.tests.helpers import FakeUserProvider -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper -import landscape.client.monitor.usermonitor class UserMonitorNoManagerTest(LandscapeTest): @@ -26,13 +28,26 @@ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) plugin.stop() users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] @@ -51,28 +66,34 @@ helpers = [MonitorHelper] def setUp(self): - super(UserMonitorTest, self).setUp() + super().setUp() self.shadow_file = self.makeFile( "jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7:::\n" "psmith:!:13348:0:99999:7:::\n" - "sam:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7:::\n") + "sam:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7:::\n", + ) self.user_manager = UserManager(shadow_file=self.shadow_file) - self.publisher = ComponentPublisher(self.user_manager, self.reactor, - self.config) + self.publisher = ComponentPublisher( + self.user_manager, + self.reactor, + self.config, + ) self.publisher.start() self.provider = FakeUserProvider() self.plugin = UserMonitor(self.provider) # Part of bug 1048576 remediation: self._original_USER_UPDATE_FLAG_FILE = ( - landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE) + landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE + ) def tearDown(self): self.publisher.stop() self.plugin.stop() # Part of bug 1048576 remediation: landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE = ( - self._original_USER_UPDATE_FLAG_FILE) - return super(UserMonitorTest, self).tearDown() + self._original_USER_UPDATE_FLAG_FILE + ) + return super().tearDown() def test_constants(self): """ @@ -89,8 +110,9 @@ L{UserChange} snapshots should be cleared and a new message with users generated. """ - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) @@ -99,36 +121,64 @@ self.assertTrue(persist.get("users")) self.assertTrue(persist.get("groups")) self.assertMessages( - self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) + self.broker_service.message_store.get_pending_messages(), + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) self.broker_service.message_store.delete_all_messages() deferred = self.monitor.reactor.fire( - "resynchronize", scopes=["users"])[0] + "resynchronize", + scopes=["users"], + )[0] self.successResultOf(deferred) self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", - "work-phone": None}], - "type": "users"}]) + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) def test_new_message_after_resynchronize_event(self): """ When a 'resynchronize' reactor event is fired, a new session is created and the UserMonitor creates a new message. """ - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) @@ -137,21 +187,35 @@ self.successResultOf(deferred) self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) def test_wb_resynchronize_event_with_global_scope(self): """ When a C{resynchronize} event, with global scope, occurs we act exactly as if it had 'users' scope. """ - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) @@ -160,35 +224,61 @@ self.assertTrue(persist.get("users")) self.assertTrue(persist.get("groups")) self.assertMessages( - self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) + self.broker_service.message_store.get_pending_messages(), + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) self.broker_service.message_store.delete_all_messages() deferred = self.monitor.reactor.fire("resynchronize")[0] self.successResultOf(deferred) self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", - "work-phone": None}], - "type": "users"}]) + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) def test_do_not_resynchronize_with_other_scope(self): """ When a C{resynchronize} event, with an irrelevant scope, occurs we do nothing. """ - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) @@ -197,19 +287,33 @@ self.assertTrue(persist.get("users")) self.assertTrue(persist.get("groups")) self.assertMessages( - self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) + self.broker_service.message_store.get_pending_messages(), + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) self.broker_service.message_store.delete_all_messages() self.monitor.reactor.fire("resynchronize", scopes=["disk"])[0] self.assertMessages( self.broker_service.message_store.get_pending_messages(), - []) + [], + ) def test_run(self): """ @@ -221,16 +325,30 @@ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) - - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) + + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) @@ -262,17 +380,31 @@ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "operation-id": 1001, - "type": "users"}]) - - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "operation-id": 1001, + "type": "users", + }, + ], + ) + + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types(["users"]) @@ -281,21 +413,34 @@ return result def test_detect_changes(self): - def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) - - self.broker_service.message_store.set_accepted_types(["users"]) - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) + + self.broker_service.message_store.set_accepted_types(["users"]) + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) @@ -315,18 +460,32 @@ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "operation-id": 1001, - "type": "users"}]) - - self.broker_service.message_store.set_accepted_types(["users"]) - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "operation-id": 1001, + "type": "users", + }, + ], + ) + + self.broker_service.message_store.set_accepted_types(["users"]) + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) @@ -344,7 +503,7 @@ """ self.monitor.add(self.plugin) - class FauxUserChanges(object): + class FauxUserChanges: cleared = False def __init__(self, *args): @@ -357,13 +516,14 @@ self.__class__.cleared = True # Create the (temporary, test) user update flag file. - landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE = \ - update_flag_file = self.makeFile("") + landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE = ( + update_flag_file + ) = self.makeFile("") self.addCleanup(lambda: os.remove(update_flag_file)) # Trigger a detect changes. self.plugin._persist = None - self.plugin._detect_changes([], UserChanges=FauxUserChanges) + self.plugin._detect_changes([], userchanges=FauxUserChanges) # The clear() method was called. self.assertTrue(FauxUserChanges.cleared) @@ -379,12 +539,14 @@ self.assertFalse(os.path.exists(update_flag_file)) # Create the (temporary, test) user update flag file. - landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE = \ - update_flag_file = self.makeFile("") + landscape.client.monitor.usermonitor.USER_UPDATE_FLAG_FILE = ( + update_flag_file + ) = self.makeFile("") self.broker_service.message_store.set_accepted_types(["users"]) - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [] self.monitor.add(self.plugin) @@ -408,8 +570,9 @@ self.assertMessages(list(mstore.get_pending_messages()), []) self.broker_service.message_store.set_accepted_types([]) - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) @@ -420,27 +583,42 @@ return result def test_call_on_accepted(self): - def got_result(result): mstore = self.broker_service.message_store self.assertMessages( mstore.get_pending_messages(), - [{"create-group-members": {u"webdev": [u"jdoe"]}, - "create-groups": [{"gid": 1000, "name": u"webdev"}], - "create-users": [{"enabled": True, "home-phone": None, - "location": None, "name": u"JD", - "primary-gid": 1000, "uid": 1000, - "username": u"jdoe", "work-phone": None}], - "type": "users"}]) - - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + [ + { + "create-group-members": {"webdev": ["jdoe"]}, + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-users": [ + { + "enabled": True, + "home-phone": None, + "location": None, + "name": "JD", + "primary-gid": 1000, + "uid": 1000, + "username": "jdoe", + "work-phone": None, + }, + ], + "type": "users", + }, + ], + ) + + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types(["users"]) result = self.reactor.fire( - ("message-type-acceptance-changed", "users"), True) + ("message-type-acceptance-changed", "users"), + True, + ) result = [x for x in result if x][0] result.addCallback(got_result) return result @@ -463,11 +641,13 @@ self.broker_service.message_store.set_accepted_types(["users"]) - self.monitor.broker.send_message = Mock(return_value=fail( - RuntimeError())) - - self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", - "/home/jdoe", "/bin/sh")] + self.monitor.broker.send_message = Mock( + return_value=fail(RuntimeError()), + ) + + self.provider.users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) @@ -476,4 +656,7 @@ result.addCallback(got_result) result.addCallback(lambda x: connector.disconnect()) self.monitor.broker.send_message.assert_called_once_with( - ANY, ANY, urgent=True) + ANY, + ANY, + urgent=True, + ) diff -Nru landscape-client-23.02/landscape/client/monitor/ubuntuproinfo.py landscape-client-23.08/landscape/client/monitor/ubuntuproinfo.py --- landscape-client-23.02/landscape/client/monitor/ubuntuproinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/ubuntuproinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -32,15 +32,19 @@ try: completed_process = subprocess.run( ["ua", "status", "--format", "json"], - encoding="utf8", stdout=subprocess.PIPE) + encoding="utf8", + stdout=subprocess.PIPE, + ) except FileNotFoundError: return { - "errors": [{ - "message": "ubuntu-advantage-tools not found.", - "message_code": "tools-error", - "service": None, - "type": "system", - }], + "errors": [ + { + "message": "ubuntu-advantage-tools not found.", + "message_code": "tools-error", + "service": None, + "type": "system", + }, + ], "result": "failure", } else: diff -Nru landscape-client-23.02/landscape/client/monitor/ubuntuprorebootrequired.py landscape-client-23.08/landscape/client/monitor/ubuntuprorebootrequired.py --- landscape-client-23.02/landscape/client/monitor/ubuntuprorebootrequired.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/ubuntuprorebootrequired.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,27 @@ +from uaclient.api.u.pro.security.status.reboot_required.v1 import ( + reboot_required, +) + +from landscape.client.monitor.plugin import DataWatcher + + +class UbuntuProRebootRequired(DataWatcher): + """ + Plugin that captures and reports from Ubuntu Pro API if the system needs to + be rebooted. The `uaclient` Python API should be installed by default. + """ + + message_type = "ubuntu-pro-reboot-required" + message_key = message_type + persist_name = message_type + scope = "ubuntu-pro" + run_immediately = True + run_interval = 900 # 15 minutes + + def get_data(self): + """ + Return the JSON formatted output of "reboot-required" from Ubuntu Pro + API. + """ + + return reboot_required().to_json() diff -Nru landscape-client-23.02/landscape/client/monitor/updatemanager.py landscape-client-23.08/landscape/client/monitor/updatemanager.py --- landscape-client-23.02/landscape/client/monitor/updatemanager.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/updatemanager.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,8 @@ -import os import logging +import os -from landscape.lib.compat import SafeConfigParser from landscape.client.monitor.plugin import MonitorPlugin +from landscape.lib.compat import SafeConfigParser class UpdateManager(MonitorPlugin): @@ -45,10 +45,10 @@ valid_prompts = ["lts", "never", "normal"] if prompt not in valid_prompts: prompt = "normal" - message = ("%s contains invalid Prompt value. " - "Should be one of %s." % ( - self.update_manager_filename, - valid_prompts)) + message = ( + f"{self.update_manager_filename} contains invalid Prompt " + f"value. Should be one of {valid_prompts}." + ) logging.warning(message) return prompt @@ -60,11 +60,8 @@ if prompt == self._persist.get("prompt"): return self._persist.set("prompt", prompt) - message = { - "type": "update-manager-info", - "prompt": prompt} - logging.info("Queueing message with updated " - "update-manager status.") + message = {"type": "update-manager-info", "prompt": prompt} + logging.info("Queueing message with updated " "update-manager status.") return self.registry.broker.send_message(message, self._session_id) def run(self): @@ -72,4 +69,6 @@ Send the update-manager-info messages, if the server accepts them. """ return self.registry.broker.call_if_accepted( - "update-manager-info", self.send_message) + "update-manager-info", + self.send_message, + ) diff -Nru landscape-client-23.02/landscape/client/monitor/usermonitor.py landscape-client-23.08/landscape/client/monitor/usermonitor.py --- landscape-client-23.02/landscape/client/monitor/usermonitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/monitor/usermonitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,15 @@ import logging -import os import os.path from twisted.internet.defer import maybeDeferred -from landscape.lib.log import log_failure -from landscape.client.amp import ComponentPublisher, ComponentConnector, remote - +from landscape.client.amp import ComponentConnector +from landscape.client.amp import ComponentPublisher +from landscape.client.amp import remote from landscape.client.monitor.plugin import MonitorPlugin from landscape.client.user.changes import UserChanges from landscape.client.user.provider import UserProvider +from landscape.lib.log import log_failure # Part of bug 1048576 remediation: @@ -33,12 +33,15 @@ self._publisher = None def register(self, registry): - super(UserMonitor, self).register(registry) + super().register(registry) self.call_on_accepted("users", self._run_detect_changes, None) - self._publisher = ComponentPublisher(self, self.registry.reactor, - self.registry.config) + self._publisher = ComponentPublisher( + self, + self.registry.reactor, + self.registry.config, + ) self._publisher.start() def stop(self): @@ -49,7 +52,7 @@ def _resynchronize(self, scopes=None): """Reset user and group data.""" - deferred = super(UserMonitor, self)._resynchronize(scopes=scopes) + deferred = super()._resynchronize(scopes=scopes) # Wait for the superclass' asynchronous _resynchronize method to # complete, so we have a new session ID at hand and we can craft a # valid message (l.broker.client.BrokerClientPlugin._resynchronize). @@ -59,7 +62,10 @@ @remote def detect_changes(self, operation_id=None): return self.registry.broker.call_if_accepted( - "users", self._run_detect_changes, operation_id) + "users", + self._run_detect_changes, + operation_id, + ) run = detect_changes @@ -72,14 +78,17 @@ C{operation-id} field. """ from landscape.client.manager.usermanager import ( - RemoteUserManagerConnector) + RemoteUserManagerConnector, + ) + user_manager_connector = RemoteUserManagerConnector( - self.registry.reactor, self.registry.config) + self.registry.reactor, + self.registry.config, + ) # We'll skip checking the locked users if we're in monitor-only mode. if getattr(self.registry.config, "monitor_only", False): - result = maybeDeferred(self._detect_changes, - [], operation_id) + result = maybeDeferred(self._detect_changes, [], operation_id) else: def get_locked_usernames(user_manager): @@ -96,19 +105,24 @@ result.addErrback(lambda f: self._detect_changes([], operation_id)) return result - def _detect_changes(self, locked_users, operation_id=None, - UserChanges=UserChanges): - + def _detect_changes( + self, + locked_users, + operation_id=None, + userchanges=UserChanges, + ): def update_snapshot(result): changes.snapshot() return result def log_error(result): - log_failure(result, "Error occured calling send_message in " - "_detect_changes") + log_failure( + result, + "Error occured calling send_message in _detect_changes", + ) self._provider.locked_users = locked_users - changes = UserChanges(self._persist, self._provider) + changes = userchanges(self._persist, self._provider) # Part of bug 1048576 remediation: If the flag file exists, we need to # do a full update of user data. @@ -132,7 +146,10 @@ if operation_id: message["operation-id"] = operation_id result = self.registry.broker.send_message( - message, self._session_id, urgent=True) + message, + self._session_id, + urgent=True, + ) result.addCallback(update_snapshot) # Part of bug 1048576 remediation: @@ -161,7 +178,9 @@ This is part of the bug 1048576 remediation. """ return os.path.join( - self.registry.config.data_path, USER_UPDATE_FLAG_FILE) + self.registry.config.data_path, + USER_UPDATE_FLAG_FILE, + ) class RemoteUserMonitorConnector(ComponentConnector): diff -Nru landscape-client-23.02/landscape/client/package/changer.py landscape-client-23.08/landscape/client/package/changer.py --- landscape-client-23.02/landscape/client/package/changer.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/changer.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,28 +1,34 @@ +import grp import logging -import time import os import pwd -import grp +import time -from twisted.internet.defer import maybeDeferred, succeed from twisted.internet import reactor +from twisted.internet.defer import maybeDeferred +from twisted.internet.defer import succeed -from landscape.constants import ( - SUCCESS_RESULT, ERROR_RESULT, DEPENDENCY_ERROR_RESULT, - POLICY_STRICT, POLICY_ALLOW_INSTALLS, POLICY_ALLOW_ALL_CHANGES, - UNKNOWN_PACKAGE_DATA_TIMEOUT) - -from landscape.lib.config import get_bindir -from landscape.lib import base64 -from landscape.lib.fs import create_binary_file -from landscape.lib.log import log_failure -from landscape.client.package.reporter import find_reporter_command -from landscape.client.package.taskhandler import ( - PackageTaskHandler, PackageTaskHandlerConfiguration, PackageTaskError, - run_task_handler) from landscape.client.manager.manager import FAILED from landscape.client.manager.shutdownmanager import ShutdownProcessProtocol from landscape.client.monitor.rebootrequired import REBOOT_REQUIRED_FILENAME +from landscape.client.package.reporter import find_reporter_command +from landscape.client.package.taskhandler import PackageTaskError +from landscape.client.package.taskhandler import PackageTaskHandler +from landscape.client.package.taskhandler import ( + PackageTaskHandlerConfiguration, +) +from landscape.client.package.taskhandler import run_task_handler +from landscape.constants import DEPENDENCY_ERROR_RESULT +from landscape.constants import ERROR_RESULT +from landscape.constants import POLICY_ALLOW_ALL_CHANGES +from landscape.constants import POLICY_ALLOW_INSTALLS +from landscape.constants import POLICY_STRICT +from landscape.constants import SUCCESS_RESULT +from landscape.constants import UNKNOWN_PACKAGE_DATA_TIMEOUT +from landscape.lib import base64 +from landscape.lib.config import get_bindir +from landscape.lib.fs import create_binary_file +from landscape.lib.log import log_failure class UnknownPackageData(Exception): @@ -38,7 +44,7 @@ return os.path.join(self.package_directory, "binaries") -class ChangePackagesResult(object): +class ChangePackagesResult: """Value object to hold the results of change packages operation. @ivar code: The result code of the requested changes. @@ -63,14 +69,27 @@ queue_name = "changer" - def __init__(self, store, facade, remote, config, process_factory=reactor, - landscape_reactor=None, - reboot_required_filename=REBOOT_REQUIRED_FILENAME): - super(PackageChanger, self).__init__( - store, facade, remote, config, landscape_reactor) + def __init__( + self, + store, + facade, + remote, + config, + process_factory=reactor, + landscape_reactor=None, + reboot_required_filename=REBOOT_REQUIRED_FILENAME, + ): + super().__init__( + store, + facade, + remote, + config, + landscape_reactor, + ) self._process_factory = process_factory if landscape_reactor is None: # For testing purposes. from landscape.client.reactor import LandscapeReactor + self._landscape_reactor = LandscapeReactor() else: self._landscape_reactor = landscape_reactor @@ -103,7 +122,7 @@ os.setuid(pwd.getpwnam("landscape").pw_uid) command = find_reporter_command(self._config) if self._config.config is not None: - command += " -c %s" % self._config.config + command += f" -c {self._config.config}" os.system(command) def handle_task(self, task): @@ -129,14 +148,18 @@ up again at the next run. """ failure.trap(UnknownPackageData) - logging.warning("Package data not yet synchronized with server (%r)" % - failure.value.args[0]) + logging.warning( + "Package data not yet synchronized with " + f"server ({failure.value.args[0]!r})", + ) if task.timestamp < time.time() - UNKNOWN_PACKAGE_DATA_TIMEOUT: - message = {"type": "change-packages-result", - "operation-id": task.data["operation-id"], - "result-code": ERROR_RESULT, - "result-text": "Package data has changed. " - "Please retry the operation."} + message = { + "type": "change-packages-result", + "operation-id": task.data["operation-id"], + "result-code": ERROR_RESULT, + "result-text": "Package data has changed. " + "Please retry the operation.", + } return self._broker.send_message(message, self._session_id) else: raise PackageTaskError() @@ -145,8 +168,9 @@ """ Return a boolean indicating if the update-stamp stamp file exists. """ - return (os.path.exists(self._config.update_stamp_filename) or - os.path.exists(self.update_notifier_stamp)) + return os.path.exists( + self._config.update_stamp_filename, + ) or os.path.exists(self.update_notifier_stamp) def _clear_binaries(self): """Remove any binaries and its associated channel.""" @@ -161,8 +185,9 @@ """Initialize the Apt channels as needed. @param binaries: A possibly empty list of 3-tuples of the form - (hash, id, deb), holding the hash, the id and the content of - additional Debian packages that should be loaded in the channels. + (bin_hash, bin_id, deb), holding the hash, the id and the content + of additional Debian packages that should be loaded in the + channels. """ binaries_path = self._config.binaries_path @@ -171,18 +196,27 @@ if binaries: hash_ids = {} - for hash, id, deb in binaries: - create_binary_file(os.path.join(binaries_path, "%d.deb" % id), - base64.decodebytes(deb)) - hash_ids[hash] = id + for bin_hash, bin_id, deb in binaries: + create_binary_file( + os.path.join(binaries_path, f"{bin_id:d}.deb"), + base64.decodebytes(deb), + ) + hash_ids[bin_hash] = bin_id self._store.set_hash_ids(hash_ids) self._facade.add_channel_deb_dir(binaries_path) self._facade.reload_channels(force_reload_binaries=True) self._facade.ensure_channels_reloaded() - def mark_packages(self, upgrade=False, install=(), remove=(), - hold=(), remove_hold=(), reset=True): + def mark_packages( + self, + upgrade=False, + install=(), + remove=(), + hold=(), + remove_hold=(), + reset=True, + ): """Mark packages for upgrade, installation or removal. @param upgrade: If C{True} mark all installed packages for upgrade. @@ -200,10 +234,11 @@ self._facade.mark_global_upgrade() for mark_function, mark_ids in [ - (self._facade.mark_install, install), - (self._facade.mark_remove, remove), - (self._facade.mark_hold, hold), - (self._facade.mark_remove_hold, remove_hold)]: + (self._facade.mark_install, install), + (self._facade.mark_remove, remove), + (self._facade.mark_hold, hold), + (self._facade.mark_remove_hold, remove_hold), + ]: for mark_id in mark_ids: hash = self._store.get_id_hash(mark_id) if hash is None: @@ -225,7 +260,9 @@ # Delay importing these so that we don't import Apt unless # we really need to. from landscape.lib.apt.package.facade import ( - DependencyError, TransactionError) + DependencyError, + TransactionError, + ) result = ChangePackagesResult() count = 0 @@ -252,15 +289,18 @@ result.installs.append(id) if count == 1 and self.may_complement_changes(result, policy): # Mark all missing packages and try one more iteration - self.mark_packages(install=result.installs, - remove=result.removals, reset=False) + self.mark_packages( + install=result.installs, + remove=result.removals, + reset=False, + ) else: result.code = DEPENDENCY_ERROR_RESULT else: result.code = SUCCESS_RESULT if result.code == SUCCESS_RESULT and result.text is None: - result.text = 'No changes required; all changes already performed' + result.text = "No changes required; all changes already performed" return result def may_complement_changes(self, result, policy): @@ -288,20 +328,27 @@ """Handle a C{change-packages} message.""" self.init_channels(message.get("binaries", ())) - self.mark_packages(upgrade=message.get("upgrade-all", False), - install=message.get("install", ()), - remove=message.get("remove", ()), - hold=message.get("hold", ()), - remove_hold=message.get("remove-hold", ())) + self.mark_packages( + upgrade=message.get("upgrade-all", False), + install=message.get("install", ()), + remove=message.get("remove", ()), + hold=message.get("hold", ()), + remove_hold=message.get("remove-hold", ()), + ) result = self.change_packages(message.get("policy", POLICY_STRICT)) self._clear_binaries() - needs_reboot = (message.get("reboot-if-necessary") and - os.path.exists(self.reboot_required_filename)) + needs_reboot = message.get("reboot-if-necessary") and os.path.exists( + self.reboot_required_filename, + ) stop_exchanger = needs_reboot - deferred = self._send_response(None, message, result, - stop_exchanger=stop_exchanger) + deferred = self._send_response( + None, + message, + result, + stop_exchanger=stop_exchanger, + ) if needs_reboot: # Reboot the system after a short delay after the response has been # sent to the broker. This is to allow the broker time to save the @@ -327,24 +374,39 @@ protocol.set_timeout(self._landscape_reactor) protocol.result.addCallback(self._log_reboot, minutes) protocol.result.addErrback(log_failure, "Reboot failed.") - args = ["/sbin/shutdown", "-r", minutes, - "Landscape is rebooting the system"] + args = [ + "/sbin/shutdown", + "-r", + minutes, + "Landscape is rebooting the system", + ] self._process_factory.spawnProcess( - protocol, "/sbin/shutdown", args=args) + protocol, + "/sbin/shutdown", + args=args, + ) return protocol.result def _log_reboot(self, result, minutes): """Log the reboot.""" logging.warning( - "Landscape is rebooting the system in %s minutes" % minutes) + f"Landscape is rebooting the system in {minutes} minutes", + ) - def _send_response(self, reboot_result, message, package_change_result, - stop_exchanger=False): + def _send_response( + self, + reboot_result, + message, + package_change_result, + stop_exchanger=False, + ): """ Create a response and dispatch to the broker. """ - response = {"type": "change-packages-result", - "operation-id": message.get("operation-id")} + response = { + "type": "change-packages-result", + "operation-id": message.get("operation-id"), + } response["result-code"] = package_change_result.code if package_change_result.text: @@ -354,8 +416,10 @@ if package_change_result.removals: response["must-remove"] = sorted(package_change_result.removals) - logging.info("Queuing response with change package results to " - "exchange urgently.") + logging.info( + "Queuing response with change package results to " + "exchange urgently.", + ) deferred = self._broker.send_message(response, self._session_id, True) if stop_exchanger: @@ -374,7 +438,8 @@ "operation-id": message.get("operation-id"), "status": FAILED, "result-text": "This client doesn't support package locks.", - "result-code": 1} + "result-code": 1, + } return self._broker.send_message(response, self._session_id, True) @classmethod diff -Nru landscape-client-23.02/landscape/client/package/releaseupgrader.py landscape-client-23.08/landscape/client/package/releaseupgrader.py --- landscape-client-23.02/landscape/client/package/releaseupgrader.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/releaseupgrader.py 2023-08-17 21:19:32.000000000 +0000 @@ -8,16 +8,22 @@ from twisted.internet.defer import succeed +from landscape.client.manager.manager import FAILED +from landscape.client.manager.manager import SUCCEEDED +from landscape.client.package.reporter import find_reporter_command +from landscape.client.package.taskhandler import PackageTaskHandler +from landscape.client.package.taskhandler import ( + PackageTaskHandlerConfiguration, +) +from landscape.client.package.taskhandler import run_task_handler from landscape.lib.config import get_bindir -from landscape.lib.fetch import url_to_filename, fetch_to_files -from landscape.lib.lsb_release import parse_lsb_release, LSB_RELEASE_FILENAME -from landscape.lib.gpg import gpg_verify +from landscape.lib.fetch import fetch_to_files +from landscape.lib.fetch import url_to_filename from landscape.lib.fs import read_text_file -from landscape.client.package.taskhandler import ( - PackageTaskHandlerConfiguration, PackageTaskHandler, run_task_handler) +from landscape.lib.gpg import gpg_verify +from landscape.lib.lsb_release import LSB_RELEASE_FILENAME +from landscape.lib.lsb_release import parse_lsb_release from landscape.lib.twisted_util import spawn_process -from landscape.client.manager.manager import SUCCEEDED, FAILED -from landscape.client.package.reporter import find_reporter_command class ReleaseUpgraderConfiguration(PackageTaskHandlerConfiguration): @@ -54,11 +60,13 @@ def make_operation_result_message(self, operation_id, status, text, code): """Convenience to create messages of type C{"operation-result"}.""" - return {"type": "operation-result", - "operation-id": operation_id, - "status": status, - "result-text": text, - "result-code": code} + return { + "type": "operation-result", + "operation-id": operation_id, + "status": status, + "result-text": text, + "result-code": code, + } def handle_task(self, task): """Call the proper handler for the given C{task}.""" @@ -78,10 +86,15 @@ if target_code_name == current_code_name: message = self.make_operation_result_message( - operation_id, FAILED, - "The system is already running %s." % target_code_name, 1) - logging.info("Queuing message with release upgrade failure to " - "exchange urgently.") + operation_id, + FAILED, + f"The system is already running {target_code_name}.", + 1, + ) + logging.info( + "Queuing message with release upgrade failure to " + "exchange urgently.", + ) return self._send_message(message) tarball_url = message["upgrade-tool-tarball-url"] @@ -89,19 +102,26 @@ allow_third_party = message.get("allow-third-party", False) debug = message.get("debug", False) directory = self._config.upgrade_tool_directory - tarball_filename = url_to_filename(tarball_url, - directory=directory) - signature_filename = url_to_filename(signature_url, - directory=directory) + tarball_filename = url_to_filename(tarball_url, directory=directory) + signature_filename = url_to_filename( + signature_url, + directory=directory, + ) result = self.fetch(tarball_url, signature_url) - result.addCallback(lambda x: self.verify(tarball_filename, - signature_filename)) + result.addCallback( + lambda x: self.verify(tarball_filename, signature_filename), + ) result.addCallback(lambda x: self.extract(tarball_filename)) result.addCallback(lambda x: self.tweak(current_code_name)) - result.addCallback(lambda x: self.upgrade( - target_code_name, operation_id, - allow_third_party=allow_third_party, debug=debug)) + result.addCallback( + lambda x: self.upgrade( + target_code_name, + operation_id, + allow_third_party=allow_third_party, + debug=debug, + ), + ) result.addCallback(lambda x: self.finish()) result.addErrback(self.abort, operation_id) return result @@ -115,9 +135,11 @@ if not os.path.exists(self._config.upgrade_tool_directory): os.mkdir(self._config.upgrade_tool_directory) - result = fetch_to_files([tarball_url, signature_url], - self._config.upgrade_tool_directory, - logger=logging.warning) + result = fetch_to_files( + [tarball_url, signature_url], + self._config.upgrade_tool_directory, + logger=logging.warning, + ) def log_success(ignored): logging.info("Successfully fetched upgrade-tool files") @@ -142,8 +164,10 @@ logging.info("Successfully verified upgrade-tool tarball") def log_failure(failure): - logging.warning("Invalid signature for upgrade-tool tarball: %s" - % str(failure.value)) + logging.warning( + "Invalid signature for upgrade-tool " + f"tarball: {str(failure.value)}", + ) return failure result.addCallback(log_success) @@ -171,8 +195,7 @@ # party environment variable, so this trick is needed to make it # possible to upgrade against testing client packages from the # Landscape PPA - mirrors_filename = os.path.join(upgrade_tool_directory, - "mirrors.cfg") + mirrors_filename = os.path.join(upgrade_tool_directory, "mirrors.cfg") fd = open(mirrors_filename, "a") fd.write(self.landscape_ppa_url + "\n") fd.close() @@ -190,19 +213,24 @@ for label, content in [("output", out), ("error", err)]: if content: - buf.write(u"=== Standard %s ===\n\n%s\n\n" % (label, content)) + buf.write(f"=== Standard {label} ===\n\n{content}\n\n") for basename in sorted(os.listdir(self.logs_directory)): if not basename.endswith(".log"): continue filename = os.path.join(self.logs_directory, basename) content = read_text_file(filename, -self.logs_limit) - buf.write(u"=== %s ===\n\n%s\n\n" % (basename, content)) + buf.write(f"=== {basename} ===\n\n{content}\n\n") return buf.getvalue() - def upgrade(self, code_name, operation_id, allow_third_party=False, - debug=False): + def upgrade( + self, + code_name, + operation_id, + allow_third_party=False, + debug=False, + ): """Run the upgrade-tool command and send a report of the results. @param code_name: The code-name of the release to upgrade to. @@ -213,6 +241,7 @@ # This bizarre (and apparently unused) import is a workaround for # LP: #1670291 -- see comments in that ticket for an explanation import twisted.internet.unix # noqa: F401 + upgrade_tool_directory = self._config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, code_name) args = ["--frontend", "DistUpgradeViewNonInteractive"] @@ -222,8 +251,13 @@ if debug: env["DEBUG_UPDATE_MANAGER"] = "True" - result = spawn_process(upgrade_tool_filename, args=args, env=env, - path=upgrade_tool_directory, wait_pipes=False) + result = spawn_process( + upgrade_tool_filename, + args=args, + env=env, + path=upgrade_tool_directory, + wait_pipes=False, + ) def send_operation_result(args): out, err, code = args @@ -235,10 +269,16 @@ else: status = FAILED text = self.make_operation_result_text(out, err) - message = self.make_operation_result_message(operation_id, status, - text, code) - logging.info("Queuing message with release upgrade results to " - "exchange urgently.") + message = self.make_operation_result_message( + operation_id, + status, + text, + code, + ) + logging.info( + "Queuing message with release upgrade results to " + "exchange urgently.", + ) return self._send_message(message) result.addCallback(send_operation_result) @@ -261,19 +301,31 @@ args = ["--force-apt-update"] if self._config.config is not None: - args.append("--config=%s" % self._config.config) + args.append(f"--config={self._config.config}") - return spawn_process(reporter, args=args, uid=uid, gid=gid, - path=os.getcwd(), env=os.environ) + return spawn_process( + reporter, + args=args, + uid=uid, + gid=gid, + path=os.getcwd(), + env=os.environ, + ) def abort(self, failure, operation_id): """Abort the task reporting details about the failure.""" message = self.make_operation_result_message( - operation_id, FAILED, "%s" % str(failure.value), 1) - - logging.info("Queuing message with release upgrade failure to " - "exchange urgently.") + operation_id, + FAILED, + str(failure.value), + 1, + ) + + logging.info( + "Queuing message with release upgrade failure to " + "exchange urgently.", + ) return self._send_message(message) diff -Nru landscape-client-23.02/landscape/client/package/reporter.py landscape-client-23.08/landscape/client/package/reporter.py --- landscape-client-23.02/landscape/client/package/reporter.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/reporter.py 2023-08-17 21:19:32.000000000 +0000 @@ -12,11 +12,17 @@ import re from twisted.internet.defer import ( - Deferred, succeed, inlineCallbacks, returnValue) + Deferred, + succeed, + inlineCallbacks, + returnValue, +) from landscape.lib import bpickle from landscape.lib.apt.package.store import ( - UnknownHashIDRequest, FakePackageStore) + UnknownHashIDRequest, + FakePackageStore, +) from landscape.lib.config import get_bindir from landscape.lib.sequenceranges import sequence_to_ranges from landscape.lib.twisted_util import gather_results, spawn_process @@ -24,7 +30,10 @@ from landscape.lib.fs import touch_file, create_binary_file from landscape.lib.lsb_release import parse_lsb_release, LSB_RELEASE_FILENAME from landscape.client.package.taskhandler import ( - PackageTaskHandlerConfiguration, PackageTaskHandler, run_task_handler) + PackageTaskHandlerConfiguration, + PackageTaskHandler, + run_task_handler, +) HASH_ID_REQUEST_TIMEOUT = 7200 @@ -43,14 +52,23 @@ Specialize L{Configuration.make_parser}, adding options reporter-specific options. """ - parser = super(PackageReporterConfiguration, self).make_parser() - parser.add_option("--force-apt-update", default=False, - action="store_true", - help="Force running apt-update.") - parser.add_option("--http-proxy", metavar="URL", - help="The URL of the HTTP proxy, if one is needed.") - parser.add_option("--https-proxy", metavar="URL", - help="The URL of the HTTPS proxy, if one is needed.") + parser = super().make_parser() + parser.add_option( + "--force-apt-update", + default=False, + action="store_true", + help="Force running apt-update.", + ) + parser.add_option( + "--http-proxy", + metavar="URL", + help="The URL of the HTTP proxy, if one is needed.", + ) + parser.add_option( + "--https-proxy", + metavar="URL", + help="The URL of the HTTPS proxy, if one is needed.", + ) return parser @@ -59,6 +77,7 @@ @cvar queue_name: Name of the task queue to pick tasks from. """ + config_factory = PackageReporterConfiguration queue_name = "reporter" @@ -99,8 +118,7 @@ return result def send_message(self, message): - return self._broker.send_message( - message, self._session_id, True) + return self._broker.send_message(message, self._session_id, True) def fetch_hash_id_db(self): """ @@ -137,21 +155,24 @@ def fetch_ok(data): create_binary_file(hash_id_db_filename, data) - logging.info("Downloaded hash=>id database from %s" % url) + logging.info(f"Downloaded hash=>id database from {url}") def fetch_error(failure): exception = failure.value - logging.warning("Couldn't download hash=>id database: %s" % - str(exception)) + logging.warning( + f"Couldn't download hash=>id database: {str(exception)}", + ) if url.startswith("https"): proxy = self._config.get("https_proxy") else: proxy = self._config.get("http_proxy") - result = fetch_async(url, - cainfo=self._config.get("ssl_public_key"), - proxy=proxy) + result = fetch_async( + url, + cainfo=self._config.get("ssl_public_key"), + proxy=proxy, + ) result.addCallback(fetch_ok) result.addErrback(fetch_error) @@ -173,8 +194,10 @@ # If config.url is http://host:123/path/to/message-system # then we'll use http://host:123/path/to/hash-id-databases - base_url = urlparse.urljoin(self._config.url.rstrip("/"), - "hash-id-databases") + base_url = urlparse.urljoin( + self._config.url.rstrip("/"), + "hash-id-databases", + ) return base_url.rstrip("/") + "/" @@ -189,12 +212,16 @@ if os.path.exists(self.sources_list_directory): filenames.extend( - [os.path.join(self.sources_list_directory, filename) for - filename in os.listdir(self.sources_list_directory)]) + [ + os.path.join(self.sources_list_directory, filename) + for filename in os.listdir(self.sources_list_directory) + ], + ) for filename in filenames: - seconds_since_last_change = ( - time.time() - os.path.getmtime(filename)) + seconds_since_last_change = time.time() - os.path.getmtime( + filename, + ) if seconds_since_last_change < PackageMonitor.run_interval: return True @@ -238,22 +265,23 @@ with open(os.path.join(base, "status")) as fd: read = fd.read() - pattern = re.compile(r'^Uid\:(.*)$', - re.VERBOSE | re.MULTILINE) + pattern = re.compile(r"^Uid\:(.*)$", re.VERBOSE | re.MULTILINE) for pattern in pattern.finditer(read): uid = pattern.groups()[0].split("\t")[1] - except IOError: + except OSError: continue (executable, args) = (cmdline[0], cmdline[1:]) - if (executable.startswith(PYTHON_BIN) and - any(x.startswith(RELEASE_UPGRADER_PATTERN) - for x in args) and - uid == UID_ROOT): - logging.info("Found ubuntu-release-upgrader running (pid: %s)" - % (pid)) + if ( + executable.startswith(PYTHON_BIN) + and any(x.startswith(RELEASE_UPGRADER_PATTERN) for x in args) + and uid == UID_ROOT + ): + logging.info( + f"Found ubuntu-release-upgrader running (pid: {pid})", + ) return True return False @@ -265,20 +293,26 @@ @return: a deferred returning (out, err, code) """ - if (self._config.force_apt_update or - self._apt_sources_have_changed() or - self._apt_update_timeout_expired(self._config.apt_update_interval) - ) and \ - not self._is_release_upgrader_running(): + if ( + self._config.force_apt_update + or self._apt_sources_have_changed() + or self._apt_update_timeout_expired( + self._config.apt_update_interval, + ) + ) and not self._is_release_upgrader_running(): accepted_apt_errors = ( "Problem renaming the file /var/cache/apt/srcpkgcache.bin", - "Problem renaming the file /var/cache/apt/pkgcache.bin") + "Problem renaming the file /var/cache/apt/pkgcache.bin", + ) for retry in range(len(LOCK_RETRY_DELAYS)): deferred = Deferred() self._reactor.call_later( - LOCK_RETRY_DELAYS[retry], self._apt_update, deferred) + LOCK_RETRY_DELAYS[retry], + self._apt_update, + deferred, + ) out, err, code = yield deferred out = out.decode("utf-8") err = err.decode("utf-8") @@ -287,19 +321,23 @@ touch_file(self._config.update_stamp_filename) logging.debug( - "'%s' exited with status %d (out='%s', err='%s')" % ( - self.apt_update_filename, code, out, err)) + f"'{self.apt_update_filename}' exited with " + f"status {code:d} (out='{out}', err='{err}')", + ) if code != 0: if code == 100: if retry < len(LOCK_RETRY_DELAYS) - 1: logging.warning( - "Could not acquire the apt lock. Retrying in" - " %s seconds." % LOCK_RETRY_DELAYS[retry + 1]) + "Could not acquire the apt lock. Retrying in " + f"{LOCK_RETRY_DELAYS[retry + 1]} seconds.", + ) continue - logging.warning("'%s' exited with status %d (%s)" % ( - self.apt_update_filename, code, err)) + logging.warning( + f"'{self.apt_update_filename}' exited with " + f"status {code:d} ({err})", + ) # Errors caused by missing cache files are acceptable, # as they are not an issue for the lists update @@ -313,17 +351,24 @@ elif not self._facade.get_channels(): code = 1 - err = ("There are no APT sources configured in %s or %s." % - (self.sources_list_filename, - self.sources_list_directory)) + err = ( + "There are no APT sources configured " + f"in {self.sources_list_filename} " + f"or {self.sources_list_directory}." + ) yield self._broker.call_if_accepted( - "package-reporter-result", self.send_result, timestamp, - code, err) + "package-reporter-result", + self.send_result, + timestamp, + code, + err, + ) yield returnValue((out, err, code)) else: - logging.debug("'%s' didn't run, conditions not met" % - self.apt_update_filename) + logging.debug( + f"'{self.apt_update_filename}' didn't run, conditions not met", + ) yield returnValue(("", "", 0)) def _apt_update(self, deferred): @@ -351,7 +396,8 @@ "type": "package-reporter-result", "report-timestamp": timestamp, "code": code, - "err": err} + "err": err, + } return self.send_message(message) def handle_task(self, task): @@ -366,7 +412,7 @@ return self._handle_resynchronize() # Skip and continue. - logging.warning("Unknown task message type: {!r}".format(message_type)) + logging.warning(f"Unknown task message type: {message_type!r}") return succeed(None) def _handle_package_ids(self, message): @@ -388,8 +434,10 @@ self._store.set_hash_ids(hash_ids) - logging.info("Received %d package hash => id translations, %d hashes " - "are unknown." % (len(hash_ids), len(unknown_hashes))) + logging.info( + f"Received {len(hash_ids):d} package hash => " + f"id translations, {len(unknown_hashes):d} hashes are unknown.", + ) if unknown_hashes: result = self._handle_unknown_packages(unknown_hashes) @@ -424,37 +472,45 @@ if hash in hashes: added_hashes.append(hash) skeleton = self._facade.get_package_skeleton(package) - packages.append({"type": skeleton.type, - "name": skeleton.name, - "version": skeleton.version, - "section": skeleton.section, - "summary": skeleton.summary, - "description": skeleton.description, - "size": skeleton.size, - "installed-size": skeleton.installed_size, - "relations": skeleton.relations}) + packages.append( + { + "type": skeleton.type, + "name": skeleton.name, + "version": skeleton.version, + "section": skeleton.section, + "summary": skeleton.summary, + "description": skeleton.description, + "size": skeleton.size, + "installed-size": skeleton.installed_size, + "relations": skeleton.relations, + }, + ) if packages: - logging.info("Queuing messages with data for %d packages to " - "exchange urgently." % len(packages)) + logging.info( + f"Queuing messages with data for {len(packages):d} packages " + "to exchange urgently.", + ) message = {"type": "add-packages", "packages": packages} - result = self._send_message_with_hash_id_request(message, - added_hashes) + result = self._send_message_with_hash_id_request( + message, + added_hashes, + ) else: result = succeed(None) return result def _remove_hash_id_db(self): - def _remove_it(hash_id_db_filename): if hash_id_db_filename and os.path.exists(hash_id_db_filename): logging.warning( - "Removing cached hash=>id database %s", - hash_id_db_filename) + f"Removing cached hash=>id database {hash_id_db_filename}", + ) os.remove(hash_id_db_filename) + result = self._determine_hash_id_db_filename() result.addCallback(_remove_it) return result @@ -515,14 +571,20 @@ unknown_hashes = sorted(unknown_hashes) unknown_hashes = unknown_hashes[:MAX_UNKNOWN_HASHES_PER_REQUEST] - logging.info("Queuing request for package hash => id " - "translation on %d hash(es)." % len(unknown_hashes)) - - message = {"type": "unknown-package-hashes", - "hashes": unknown_hashes} - - result = self._send_message_with_hash_id_request(message, - unknown_hashes) + logging.info( + "Queuing request for package hash => id " + f"translation on {len(unknown_hashes):d} hash(es).", + ) + + message = { + "type": "unknown-package-hashes", + "hashes": unknown_hashes, + } + + result = self._send_message_with_hash_id_request( + message, + unknown_hashes, + ) return result @@ -584,7 +646,7 @@ status_file = apt_pkg.config.find_file("dir::state::status") lists_dir = apt_pkg.config.find_dir("dir::state::lists") files = [status_file, lists_dir] - files.extend(glob.glob("%s/*Packages" % lists_dir)) + files.extend(glob.glob(f"{lists_dir}/*Packages")) last_checked = os.stat(stamp_file).st_mtime for f in files: @@ -593,7 +655,7 @@ return True return False - def _compute_packages_changes(self): + def _compute_packages_changes(self): # noqa: max-complexity: 13 """Analyse changes in the universe of known packages. This method will verify if there are packages that: @@ -646,10 +708,13 @@ # ignore backports, so that packages don't get automatically # upgraded to the backports version. backport_origins = [ - origin for origin in package.origins - if origin.archive == backports_archive] + origin + for origin in package.origins + if origin.archive == backports_archive + ] if backport_origins and ( - len(backport_origins) == len(package.origins)): + len(backport_origins) == len(package.origins) + ): # Ignore the version if it's only in the official # backports archive. If it's somewhere else as well, # e.g. a PPA, we assume it was added manually and the @@ -673,8 +738,10 @@ # Is this package present in the security pocket? security_origins = any( - origin for origin in package.origins - if origin.archive == security_archive) + origin + for origin in package.origins + if origin.archive == security_archive + ) if security_origins: current_security.add(id) @@ -700,44 +767,54 @@ message = {} if new_installed: - message["installed"] = \ - list(sequence_to_ranges(sorted(new_installed))) + message["installed"] = list( + sequence_to_ranges(sorted(new_installed)), + ) if new_available: - message["available"] = \ - list(sequence_to_ranges(sorted(new_available))) + message["available"] = list( + sequence_to_ranges(sorted(new_available)), + ) if new_upgrades: - message["available-upgrades"] = \ - list(sequence_to_ranges(sorted(new_upgrades))) + message["available-upgrades"] = list( + sequence_to_ranges(sorted(new_upgrades)), + ) if new_locked: - message["locked"] = \ - list(sequence_to_ranges(sorted(new_locked))) + message["locked"] = list(sequence_to_ranges(sorted(new_locked))) if new_autoremovable: message["autoremovable"] = list( - sequence_to_ranges(sorted(new_autoremovable))) + sequence_to_ranges(sorted(new_autoremovable)), + ) if not_autoremovable: message["not-autoremovable"] = list( - sequence_to_ranges(sorted(not_autoremovable))) + sequence_to_ranges(sorted(not_autoremovable)), + ) if new_security: message["security"] = list( - sequence_to_ranges(sorted(new_security))) + sequence_to_ranges(sorted(new_security)), + ) if not_security: message["not-security"] = list( - sequence_to_ranges(sorted(not_security))) + sequence_to_ranges(sorted(not_security)), + ) if not_installed: - message["not-installed"] = \ - list(sequence_to_ranges(sorted(not_installed))) + message["not-installed"] = list( + sequence_to_ranges(sorted(not_installed)), + ) if not_available: - message["not-available"] = \ - list(sequence_to_ranges(sorted(not_available))) + message["not-available"] = list( + sequence_to_ranges(sorted(not_available)), + ) if not_upgrades: - message["not-available-upgrades"] = \ - list(sequence_to_ranges(sorted(not_upgrades))) + message["not-available-upgrades"] = list( + sequence_to_ranges(sorted(not_upgrades)), + ) if not_locked: - message["not-locked"] = \ - list(sequence_to_ranges(sorted(not_locked))) + message["not-locked"] = list( + sequence_to_ranges(sorted(not_locked)), + ) if not message: return succeed(False) @@ -747,23 +824,19 @@ logging.info( "Queuing message with changes in known packages: " - "%(installed)d installed, %(available)d available, " - "%(upgrades)d available upgrades, %(locked)d locked, " - "%(auto)d autoremovable, %(security)d security, " - "%(not_installed)d not installed, " - "%(not_available)d not available, " - "%(not_upgrades)d not available upgrades, " - "%(not_locked)d not locked, " - "%(not_auto)d not autoremovable, " - "%(not_security)d not security.", - dict( - installed=len(new_installed), available=len(new_available), - upgrades=len(new_upgrades), locked=len(new_locked), - auto=len(new_autoremovable), not_installed=len(not_installed), - not_available=len(not_available), - not_upgrades=len(not_upgrades), not_locked=len(not_locked), - not_auto=len(not_autoremovable), security=len(new_security), - not_security=len(not_security))) + f"{len(new_installed):d} installed, " + f"{len(new_available):d} available, " + f"{len(new_upgrades):d} available upgrades, " + f"{len(new_locked):d} locked, " + f"{len(new_autoremovable):d} autoremovable, " + f"{len(new_security):d} security, " + f"{len(not_installed):d} not installed, " + f"{len(not_available):d} not available, " + f"{len(not_upgrades):d} not available upgrades, " + f"{len(not_locked):d} not locked, " + f"{len(not_autoremovable):d} not autoremovable, " + f"{len(not_security):d} not security.", + ) def update_currently_known(result): if new_installed: @@ -811,7 +884,7 @@ def send_message(self, message): self._store.save_message(message) - return super(FakeGlobalReporter, self).send_message(message) + return super().send_message(message) class FakeReporter(PackageReporter): @@ -861,7 +934,8 @@ got_type.add(message["type"]) sent.append(message_id) deferred.addCallback( - lambda x, message=message: self.send_message(message)) + lambda x, message=message: self.send_message(message), + ) self._store.save_message_ids(sent) return deferred diff -Nru landscape-client-23.02/landscape/client/package/taskhandler.py landscape-client-23.08/landscape/client/package/taskhandler.py --- landscape-client-23.02/landscape/client/package/taskhandler.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/taskhandler.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,22 @@ +import logging import os import re -import logging -from twisted.internet.defer import succeed, Deferred, maybeDeferred +from twisted.internet.defer import Deferred +from twisted.internet.defer import maybeDeferred +from twisted.internet.defer import succeed -from landscape.lib.apt.package.store import PackageStore, InvalidHashIdDb -from landscape.lib.lock import lock_path, LockError -from landscape.lib.log import log_failure -from landscape.lib.lsb_release import LSB_RELEASE_FILENAME, parse_lsb_release -from landscape.client.reactor import LandscapeReactor -from landscape.client.deployment import Configuration, init_logging from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.deployment import Configuration +from landscape.client.deployment import init_logging +from landscape.client.reactor import LandscapeReactor +from landscape.lib.apt.package.store import InvalidHashIdDb +from landscape.lib.apt.package.store import PackageStore +from landscape.lib.lock import lock_path +from landscape.lib.lock import LockError +from landscape.lib.log import log_failure +from landscape.lib.lsb_release import LSB_RELEASE_FILENAME +from landscape.lib.lsb_release import parse_lsb_release class PackageTaskError(Exception): @@ -47,7 +53,7 @@ return os.path.join(self.data_path, "detect_package_changes_timestamp") -class LazyRemoteBroker(object): +class LazyRemoteBroker: """Wrapper class around L{RemoteBroker} providing lazy initialization. This class is a wrapper around a regular L{RemoteBroker}. It connects to @@ -74,7 +80,6 @@ return getattr(self._remote, method) def wrapper(*args, **kwargs): - def got_connection(remote): self._remote = remote return getattr(self._remote, method)(*args, **kwargs) @@ -85,7 +90,7 @@ return wrapper -class PackageTaskHandler(object): +class PackageTaskHandler: config_factory = PackageTaskHandlerConfiguration @@ -97,8 +102,14 @@ # update-notifier-common package is installed. update_notifier_stamp = "/var/lib/apt/periodic/update-success-stamp" - def __init__(self, package_store, package_facade, remote_broker, config, - reactor): + def __init__( + self, + package_store, + package_facade, + remote_broker, + config, + reactor, + ): self._store = package_store self._facade = package_facade self._broker = remote_broker @@ -189,8 +200,9 @@ except InvalidHashIdDb: # The appropriate database is there but broken, # let's remove it and go on - logging.warning("Invalid hash=>id database %s" % - hash_id_db_filename) + logging.warning( + f"Invalid hash=>id database {hash_id_db_filename}", + ) os.remove(hash_id_db_filename) return @@ -215,14 +227,16 @@ try: lsb_release_info = parse_lsb_release(self.lsb_release_filename) - except IOError as error: + except OSError as error: logging.warning(warning % str(error)) return None try: codename = lsb_release_info["code-name"] except KeyError: - logging.warning(warning % "missing code-name key in %s" % - self.lsb_release_filename) + logging.warning( + warning + % f"missing code-name key in {self.lsb_release_filename}", + ) return None arch = self._facade.get_arch() @@ -233,15 +247,16 @@ logging.warning(warning % "unknown dpkg architecture") return None - return os.path.join(self._config.hash_id_directory, - "%s_%s_%s" % (server_uuid, codename, arch)) + return os.path.join( + self._config.hash_id_directory, + f"{server_uuid}_{codename}_{arch}", + ) result = self._broker.get_server_uuid() result.addCallback(got_server_uuid) return result def get_session_id(self): - def got_session_id(session_id): self._session_id = session_id return session_id @@ -274,15 +289,16 @@ os.mkdir(directory) program_name = cls.queue_name - lock_filename = os.path.join(config.package_directory, - program_name + ".lock") + lock_filename = os.path.join( + config.package_directory, + program_name + ".lock", + ) try: lock_path(lock_filename) except LockError: if config.quiet: raise SystemExit() - raise SystemExit("error: package %s is already running" - % program_name) + raise SystemExit(f"error: package {program_name} is already running") words = re.findall("[A-Z][a-z]+", cls.__name__) init_logging(config, "-".join(word.lower() for word in words)) @@ -295,6 +311,7 @@ # Delay importing of the facades so that we don't # import Apt unless we need to. from landscape.lib.apt.package.facade import AptFacade + package_facade = AptFacade() def finish(): diff -Nru landscape-client-23.02/landscape/client/package/tests/test_changer.py landscape-client-23.08/landscape/client/package/tests/test_changer.py --- landscape-client-23.02/landscape/client/package/tests/test_changer.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/tests/test_changer.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,32 +1,45 @@ -# -*- encoding: utf-8 -*- -import time -import sys import os +import sys +import time +from unittest.mock import call +from unittest.mock import Mock +from unittest.mock import patch from twisted.internet.defer import Deferred +from twisted.internet.error import ProcessDone +from twisted.internet.error import ProcessTerminated from twisted.python.failure import Failure -from twisted.internet.error import ProcessTerminated, ProcessDone -from mock import patch, Mock, call - -from landscape.lib.apt.package.store import PackageStore -from landscape.lib.apt.package.facade import ( - DependencyError, TransactionError) -from landscape.lib.apt.package.testing import ( - HASH1, HASH2, HASH3, PKGDEB1, PKGDEB2, - AptFacadeHelper, SimpleRepositoryHelper) -from landscape.lib import base64 -from landscape.lib.fs import create_text_file, read_text_file, touch_file -from landscape.lib.testing import StubProcessFactory, FakeReactor -from landscape.client.package.changer import ( - PackageChanger, main, UNKNOWN_PACKAGE_DATA_TIMEOUT, - SUCCESS_RESULT, DEPENDENCY_ERROR_RESULT, POLICY_ALLOW_INSTALLS, - POLICY_ALLOW_ALL_CHANGES, ERROR_RESULT) -from landscape.client.package.changer import ( - PackageChangerConfiguration, ChangePackagesResult) -from landscape.client.tests.helpers import LandscapeTest, BrokerServiceHelper from landscape.client.manager.manager import FAILED from landscape.client.manager.shutdownmanager import ShutdownFailedError +from landscape.client.package.changer import ChangePackagesResult +from landscape.client.package.changer import DEPENDENCY_ERROR_RESULT +from landscape.client.package.changer import ERROR_RESULT +from landscape.client.package.changer import main +from landscape.client.package.changer import PackageChanger +from landscape.client.package.changer import PackageChangerConfiguration +from landscape.client.package.changer import POLICY_ALLOW_ALL_CHANGES +from landscape.client.package.changer import POLICY_ALLOW_INSTALLS +from landscape.client.package.changer import SUCCESS_RESULT +from landscape.client.package.changer import UNKNOWN_PACKAGE_DATA_TIMEOUT +from landscape.client.tests.helpers import BrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.lib import base64 +from landscape.lib.apt.package.facade import DependencyError +from landscape.lib.apt.package.facade import TransactionError +from landscape.lib.apt.package.store import PackageStore +from landscape.lib.apt.package.testing import AptFacadeHelper +from landscape.lib.apt.package.testing import HASH1 +from landscape.lib.apt.package.testing import HASH2 +from landscape.lib.apt.package.testing import HASH3 +from landscape.lib.apt.package.testing import PKGDEB1 +from landscape.lib.apt.package.testing import PKGDEB2 +from landscape.lib.apt.package.testing import SimpleRepositoryHelper +from landscape.lib.fs import create_text_file +from landscape.lib.fs import read_text_file +from landscape.lib.fs import touch_file +from landscape.lib.testing import FakeReactor +from landscape.lib.testing import StubProcessFactory class AptPackageChangerTest(LandscapeTest): @@ -34,7 +47,7 @@ helpers = [AptFacadeHelper, SimpleRepositoryHelper, BrokerServiceHelper] def setUp(self): - super(AptPackageChangerTest, self).setUp() + super().setUp() self.store = PackageStore(self.makeFile()) self.config = PackageChangerConfiguration() self.config.data_path = self.makeDir() @@ -45,15 +58,20 @@ os.mkdir(self.config.binaries_path) touch_file(self.config.update_stamp_filename) self.changer = PackageChanger( - self.store, self.facade, self.remote, self.config, + self.store, + self.facade, + self.remote, + self.config, process_factory=self.process_factory, landscape_reactor=self.landscape_reactor, - reboot_required_filename=reboot_required_filename) + reboot_required_filename=reboot_required_filename, + ) self.changer.update_notifier_stamp = "/Not/Existing" self.changer.get_session_id() service = self.broker_service - service.message_store.set_accepted_types(["change-packages-result", - "operation-result"]) + service.message_store.set_accepted_types( + ["change-packages-result", "operation-result"], + ) def set_pkg1_installed(self): """Return the hash of a package that is installed.""" @@ -75,14 +93,18 @@ Return the hashes of the two packages. """ self._add_package_to_deb_dir( - self.repository_dir, "foo", control_fields={"Depends": "bar"}) + self.repository_dir, + "foo", + control_fields={"Depends": "bar"}, + ) self._add_package_to_deb_dir(self.repository_dir, "bar") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") return ( self.facade.get_package_hash(foo), - self.facade.get_package_hash(bar)) + self.facade.get_package_hash(bar), + ) def set_pkg2_upgrades_pkg1(self): """Make it so that one package upgrades another. @@ -95,23 +117,32 @@ foo_1, foo_2 = sorted(self.facade.get_packages_by_name("foo")) return ( self.facade.get_package_hash(foo_1), - self.facade.get_package_hash(foo_2)) + self.facade.get_package_hash(foo_2), + ) def remove_pkg2(self): """Remove package name2 from its repository.""" packages_file = os.path.join(self.repository_dir, "Packages") packages_contents = read_text_file(packages_file) packages_contents = "\n\n".join( - [stanza for stanza in packages_contents.split("\n\n") - if "Package: name2" not in stanza]) + [ + stanza + for stanza in packages_contents.split("\n\n") + if "Package: name2" not in stanza + ], + ) create_text_file(packages_file, packages_contents) def get_binaries_channels(self, binaries_path): """Return the channels that will be used for the binaries.""" - return [{"baseurl": "file://%s" % binaries_path, - "components": "", - "distribution": "./", - "type": "deb"}] + return [ + { + "baseurl": f"file://{binaries_path}", + "components": "", + "distribution": "./", + "type": "deb", + }, + ] def get_package_name(self, version): """Return the name of the package.""" @@ -132,8 +163,8 @@ def replace_perform_changes(self, func): old_perform_changes = self.Facade.perform_changes - def reset_perform_changes(Facade): - Facade.perform_changes = old_perform_changes + def reset_perform_changes(facade): + facade.perform_changes = old_perform_changes self.addCleanup(reset_perform_changes, self.Facade) self.Facade.perform_changes = func @@ -144,9 +175,10 @@ # Let's request an operation that would require an answer with a # must-install field with a package for which the id isn't yet # known by the client. - self.store.add_task("changer", - {"type": "change-packages", "install": [1], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [1], "operation-id": 123}, + ) # In our first try, we should get nothing, because the id of the # dependency (hash2) isn't known. @@ -155,8 +187,10 @@ self.assertEqual(result.called, True) self.assertMessages(self.get_pending_messages(), []) - self.assertIn("Package data not yet synchronized with server (%r)" - % hash2, self.logfile.getvalue()) + self.assertIn( + f"Package data not yet synchronized with server ({hash2!r})", + self.logfile.getvalue(), + ) # So now we'll set it, and advance the reactor to the scheduled # change detection. We'll get a lot of messages, including the @@ -165,57 +199,76 @@ result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"must-install": [2], - "operation-id": 123, - "result-code": 101, - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "must-install": [2], + "operation-id": 123, + "result-code": 101, + "type": "change-packages-result", + }, + ], + ) + return result.addCallback(got_result) def test_install_unknown_id(self): - self.store.add_task("changer", - {"type": "change-packages", "install": [456], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [456], "operation-id": 123}, + ) self.changer.handle_tasks() - self.assertIn("Package data not yet synchronized with server (456)", - self.logfile.getvalue()) + self.assertIn( + "Package data not yet synchronized with server (456)", + self.logfile.getvalue(), + ) self.assertTrue(self.store.get_next_task("changer")) def test_remove_unknown_id(self): - self.store.add_task("changer", - {"type": "change-packages", "remove": [456], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "remove": [456], "operation-id": 123}, + ) self.changer.handle_tasks() - self.assertIn("Package data not yet synchronized with server (456)", - self.logfile.getvalue()) + self.assertIn( + "Package data not yet synchronized with server (456)", + self.logfile.getvalue(), + ) self.assertTrue(self.store.get_next_task("changer")) def test_install_unknown_package(self): self.store.set_hash_ids({b"hash": 456}) - self.store.add_task("changer", - {"type": "change-packages", "install": [456], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [456], "operation-id": 123}, + ) self.changer.handle_tasks() - self.assertIn("Package data not yet synchronized with server (%r)" % - b"hash", self.logfile.getvalue()) + self.assertIn( + "Package data not yet synchronized with server (b'hash')", + self.logfile.getvalue(), + ) self.assertTrue(self.store.get_next_task("changer")) def test_remove_unknown_package(self): self.store.set_hash_ids({b"hash": 456}) - self.store.add_task("changer", - {"type": "change-packages", "remove": [456], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "remove": [456], "operation-id": 123}, + ) self.changer.handle_tasks() - self.assertIn("Package data not yet synchronized with server (%r)" % - b"hash", self.logfile.getvalue()) + self.assertIn( + "Package data not yet synchronized with server (b'hash')", + self.logfile.getvalue(), + ) self.assertTrue(self.store.get_next_task("changer")) def test_unknown_data_timeout(self): @@ -223,9 +276,10 @@ In these cases a warning is logged, and the task is removed. """ - self.store.add_task("changer", - {"type": "change-packages", "remove": [123], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "remove": [123], "operation-id": 123}, + ) the_time = time.time() + UNKNOWN_PACKAGE_DATA_TIMEOUT with patch("time.time", return_value=the_time) as time_mock: @@ -233,17 +287,22 @@ time_mock.assert_called_with() - self.assertIn("Package data not yet synchronized with server (123)", - self.logfile.getvalue()) + self.assertIn( + "Package data not yet synchronized with server (123)", + self.logfile.getvalue(), + ) def got_result(result): - message = {"type": "change-packages-result", - "operation-id": 123, - "result-code": 100, - "result-text": "Package data has changed. " - "Please retry the operation."} + message = { + "type": "change-packages-result", + "operation-id": 123, + "result-code": 100, + "result-text": "Package data has changed. " + "Please retry the operation.", + } self.assertMessages(self.get_pending_messages(), [message]) self.assertEqual(self.store.get_next_task("changer"), None) + return result.addCallback(got_result) def test_dependency_error(self): @@ -268,13 +327,15 @@ # loaded. self.facade.ensure_channels_reloaded() self.store.set_hash_ids({installed_hash: 1, HASH2: 2, HASH3: 3}) - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [2], "operation-id": 123}, + ) packages = [ self.facade.get_package_by_hash(pkg_hash) - for pkg_hash in [installed_hash, HASH2, HASH3]] + for pkg_hash in [installed_hash, HASH2, HASH3] + ] def raise_dependency_error(self): raise DependencyError(set(packages)) @@ -284,12 +345,19 @@ result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"must-install": [2, 3], - "must-remove": [1], - "operation-id": 123, - "result-code": 101, - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "must-install": [2, 3], + "must-remove": [1], + "operation-id": 123, + "result-code": 101, + "type": "change-packages-result", + }, + ], + ) + return result.addCallback(got_result) def test_dependency_error_with_binaries(self): @@ -301,18 +369,23 @@ self.remove_pkg2() installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, HASH3: 3}) - self.store.add_task("changer", - {"type": "change-packages", - "install": [2], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + }, + ) packages = set() def raise_dependency_error(self): packages.update( self.get_package_by_hash(pkg_hash) - for pkg_hash in [installed_hash, HASH2, HASH3]) + for pkg_hash in [installed_hash, HASH2, HASH3] + ) raise DependencyError(set(packages)) self.replace_perform_changes(raise_dependency_error) @@ -321,12 +394,19 @@ result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"must-install": [2, 3], - "must-remove": [1], - "operation-id": 123, - "result-code": 101, - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "must-install": [2, 3], + "must-remove": [1], + "operation-id": 123, + "result-code": 101, + "type": "change-packages-result", + }, + ], + ) + return result.addCallback(got_result) def test_perform_changes_with_allow_install_policy(self): @@ -339,7 +419,8 @@ [package1] = self.facade.get_packages_by_name("name1") self.facade.perform_changes = Mock( - side_effect=[DependencyError([package1]), "success"]) + side_effect=[DependencyError([package1]), "success"], + ) self.facade.mark_install = Mock() @@ -366,7 +447,8 @@ [package2] = self.facade.get_packages_by_name("name2") self.facade.perform_changes = Mock( - side_effect=DependencyError([package1, package2])) + side_effect=DependencyError([package1, package2]), + ) result = self.changer.change_packages(POLICY_ALLOW_INSTALLS) @@ -390,8 +472,11 @@ [package2] = self.facade.get_packages_by_name("name2") self.facade.perform_changes = Mock( - side_effect=[DependencyError([package1]), - DependencyError([package2])]) + side_effect=[ + DependencyError([package1]), + DependencyError([package2]), + ], + ) result = self.changer.change_packages(POLICY_ALLOW_INSTALLS) @@ -408,11 +493,15 @@ field that will be passed to the C{perform_changes} method. """ self.store.set_hash_ids({HASH1: 1}) - self.store.add_task("changer", - {"type": "change-packages", - "install": [1], - "policy": POLICY_ALLOW_INSTALLS, - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [1], + "policy": POLICY_ALLOW_INSTALLS, + "operation-id": 123, + }, + ) result = ChangePackagesResult() result.code = SUCCESS_RESULT @@ -423,7 +512,8 @@ self.successResultOf(self.changer.handle_tasks()) self.changer.change_packages.assert_called_once_with( - POLICY_ALLOW_INSTALLS) + POLICY_ALLOW_INSTALLS, + ) def test_perform_changes_with_policy_allow_all_changes(self): """ @@ -438,8 +528,8 @@ [package2] = self.facade.get_packages_by_name("name2") self.facade.perform_changes = Mock( - side_effect=[DependencyError([package1, package2]), - "success"]) + side_effect=[DependencyError([package1, package2]), "success"], + ) self.facade.mark_install = Mock() self.facade.mark_remove = Mock() @@ -462,9 +552,10 @@ the server know about the unsolvable problem. """ self.store.set_hash_ids({HASH1: 1}) - self.store.add_task("changer", - {"type": "change-packages", "install": [1], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [1], "operation-id": 123}, + ) self.disable_clear_channels() result = self.changer.handle_tasks() @@ -476,8 +567,11 @@ self.assertEqual(message["operation-id"], 123) self.assertEqual(message["result-code"], 100) self.assertIn( - "packages have unmet dependencies", message["result-text"]) + "packages have unmet dependencies", + message["result-text"], + ) self.assertEqual(message["type"], "change-packages-result") + return result.addCallback(got_result) def test_tasks_are_isolated_marks(self): @@ -497,12 +591,18 @@ installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, installable_hash: 2}) - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "operation-id": 123}) - self.store.add_task("changer", - {"type": "change-packages", "upgrade-all": True, - "operation-id": 124}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [2], "operation-id": 123}, + ) + self.store.add_task( + "changer", + { + "type": "change-packages", + "upgrade-all": True, + "operation-id": 124, + }, + ) result = self.changer.handle_tasks() @@ -530,12 +630,14 @@ installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, installable_hash: 2}) - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "operation-id": 123}) - self.store.add_task("changer", - {"type": "change-packages", "remove": [1], - "operation-id": 124}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [2], "operation-id": 123}, + ) + self.store.add_task( + "changer", + {"type": "change-packages", "remove": [1], "operation-id": 124}, + ) result = self.changer.handle_tasks() @@ -558,23 +660,32 @@ """ installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, HASH2: 2, HASH3: 3}) - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [2], "operation-id": 123}, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 1, - "result-text": "Yeah, I did whatever you've " - "asked for!", - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 1, + "result-text": "Yeah, I did whatever you've " + "asked for!", + "type": "change-packages-result", + }, + ], + ) + return result.addCallback(got_result) def test_successful_operation_with_binaries(self): @@ -583,25 +694,38 @@ packages. """ self.store.set_hash_ids({HASH3: 3}) - self.store.add_task("changer", - {"type": "change-packages", "install": [2, 3], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2, 3], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + }, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) self.disable_clear_channels() result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 1, - "result-text": "Yeah, I did whatever you've " - "asked for!", - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 1, + "result-text": "Yeah, I did whatever you've " + "asked for!", + "type": "change-packages-result", + }, + ], + ) + return result.addCallback(got_result) def test_global_upgrade(self): @@ -613,38 +737,59 @@ hash1, hash2 = self.set_pkg2_upgrades_pkg1() self.store.set_hash_ids({hash1: 1, hash2: 2}) - self.store.add_task("changer", - {"type": "change-packages", "upgrade-all": True, - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "upgrade-all": True, + "operation-id": 123, + }, + ) result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "must-install": [2], - "must-remove": [1], - "result-code": 101, - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "must-install": [2], + "must-remove": [1], + "result-code": 101, + "type": "change-packages-result", + }, + ], + ) return result.addCallback(got_result) def test_global_upgrade_with_nothing_to_do(self): - self.store.add_task("changer", - {"type": "change-packages", "upgrade-all": True, - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "upgrade-all": True, + "operation-id": 123, + }, + ) result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 1, - "result-text": - u"No changes required; all changes " - u"already performed", - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 1, + "result-text": "No changes required; all changes " + "already performed", + "type": "change-packages-result", + }, + ], + ) return result.addCallback(got_result) @@ -656,8 +801,10 @@ os.remove(self.config.update_stamp_filename) def assert_log(ignored): - self.assertIn("The package-reporter hasn't run yet, exiting.", - self.logfile.getvalue()) + self.assertIn( + "The package-reporter hasn't run yet, exiting.", + self.logfile.getvalue(), + ) result = self.changer.run() return result.addCallback(assert_log) @@ -668,13 +815,16 @@ # Add a task that will do nothing besides producing an # answer. The reporter is only spawned if at least one # task was handled. - self.store.add_task("changer", {"type": "change-packages", - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "operation-id": 123}, + ) self.successResultOf(self.changer.run()) system_mock.assert_called_once_with( - "/fake/bin/landscape-package-reporter") + "/fake/bin/landscape-package-reporter", + ) @patch("os.system") def test_spawn_reporter_after_running_with_config(self, system_mock): @@ -685,19 +835,27 @@ # Add a task that will do nothing besides producing an # answer. The reporter is only spawned if at least one # task was handled. - self.store.add_task("changer", {"type": "change-packages", - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "operation-id": 123}, + ) self.successResultOf(self.changer.run()) system_mock.assert_called_once_with( - "/fake/bin/landscape-package-reporter -c test.conf") + "/fake/bin/landscape-package-reporter -c test.conf", + ) @patch("os.getuid", return_value=0) @patch("os.setgid") @patch("os.setuid") @patch("os.system") def test_set_effective_uid_and_gid_when_running_as_root( - self, system_mock, setuid_mock, setgid_mock, getuid_mock): + self, + system_mock, + setuid_mock, + setgid_mock, + getuid_mock, + ): """ After the package changer has run, we want the package-reporter to run to report the recent changes. If we're running as root, we want to @@ -705,10 +863,10 @@ """ self.config.bindir = "/fake/bin" - class FakeGroup(object): + class FakeGroup: gr_gid = 199 - class FakeUser(object): + class FakeUser: pw_uid = 199 # We are running as root @@ -717,8 +875,10 @@ # Add a task that will do nothing besides producing an # answer. The reporter is only spawned if at least # one task was handled. - self.store.add_task("changer", {"type": "change-packages", - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "operation-id": 123}, + ) self.successResultOf(self.changer.run()) grnam_mock.assert_called_once_with("landscape") @@ -726,7 +886,8 @@ pwnam_mock.assert_called_once_with("landscape") setuid_mock.assert_called_once_with(199) system_mock.assert_called_once_with( - "/fake/bin/landscape-package-reporter") + "/fake/bin/landscape-package-reporter", + ) def test_run(self): changer_mock = patch.object(self, "changer") @@ -746,7 +907,9 @@ @patch("os.system") def test_dont_spawn_reporter_after_running_if_nothing_done( - self, system_mock): + self, + system_mock, + ): self.successResultOf(self.changer.run()) system_mock.assert_not_called() @@ -754,8 +917,10 @@ pid = os.getpid() + 1 with patch("os.getpgrp", return_value=pid) as getpgrp_mock: with patch("os.setsid") as setsid_mock: - with patch("landscape.client.package.changer.run_task_handler", - return_value="RESULT") as run_task_handler_mock: + with patch( + "landscape.client.package.changer.run_task_handler", + return_value="RESULT", + ) as run_task_handler_mock: self.assertEqual(main(["ARGS"]), "RESULT") getpgrp_mock.assert_called_once_with() @@ -786,30 +951,40 @@ def test_find_command_default(self): expected = os.path.join( os.path.dirname(os.path.abspath(sys.argv[0])), - "landscape-package-changer") + "landscape-package-changer", + ) command = PackageChanger.find_command() self.assertEqual(expected, command) def test_transaction_error_with_unicode_data(self): self.store.set_hash_ids({HASH1: 1}) - self.store.add_task("changer", - {"type": "change-packages", "install": [1], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "install": [1], "operation-id": 123}, + ) def raise_error(self): - raise TransactionError(u"áéíóú") + raise TransactionError("áéíóú") + self.replace_perform_changes(raise_error) self.disable_clear_channels() result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 100, - "result-text": u"áéíóú", - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 100, + "result-text": "áéíóú", + "type": "change-packages-result", + }, + ], + ) + return result.addCallback(got_result) def test_update_stamp_exists(self): @@ -835,7 +1010,8 @@ def test_binaries_path(self): self.assertEqual( self.config.binaries_path, - os.path.join(self.config.data_path, "package", "binaries")) + os.path.join(self.config.data_path, "package", "binaries"), + ) def test_init_channels(self): """ @@ -848,19 +1024,26 @@ self.changer.init_channels(binaries) binaries_path = self.config.binaries_path - self.assertFileContent(os.path.join(binaries_path, "111.deb"), - base64.decodebytes(PKGDEB1)) - self.assertFileContent(os.path.join(binaries_path, "222.deb"), - base64.decodebytes(PKGDEB2)) + self.assertFileContent( + os.path.join(binaries_path, "111.deb"), + base64.decodebytes(PKGDEB1), + ) + self.assertFileContent( + os.path.join(binaries_path, "222.deb"), + base64.decodebytes(PKGDEB2), + ) self.assertEqual( self.facade.get_channels(), - self.get_binaries_channels(binaries_path)) + self.get_binaries_channels(binaries_path), + ) self.assertEqual(self.store.get_hash_ids(), {HASH1: 111, HASH2: 222}) self.facade.ensure_channels_reloaded() - [pkg1, pkg2] = sorted(self.facade.get_packages(), - key=self.get_package_name) + [pkg1, pkg2] = sorted( + self.facade.get_packages(), + key=self.get_package_name, + ) self.assertEqual(self.facade.get_package_hash(pkg1), HASH1) self.assertEqual(self.facade.get_package_hash(pkg2), HASH2) @@ -892,13 +1075,19 @@ # like it is by default. self.facade.refetch_package_index = False self.assertEqual(None, self.facade.get_package_by_hash(HASH2)) - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + }, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() @@ -929,22 +1118,35 @@ old_mtime = time.time() - 10 os.utime(self.facade._dpkg_status, (old_mtime, old_mtime)) self.facade.reload_channels() - self.store.add_task("changer", {"type": "change-packages", - "hold": [foo.package.id], - "remove-hold": [baz.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "hold": [foo.package.id], + "remove-hold": [baz.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Package holds successfully changed.", - "result-code": 1}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Package holds successfully changed.", + "result-code": 1, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -963,10 +1165,14 @@ [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.reload_channels() - self.store.add_task("changer", {"type": "change-packages", - "hold": [foo.package.id, - bar.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "hold": [foo.package.id, bar.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() @@ -991,10 +1197,14 @@ self.facade.set_package_hold(foo) self.facade.set_package_hold(bar) self.facade.reload_channels() - self.store.add_task("changer", {"type": "change-packages", - "remove-hold": [foo.package.id, - bar.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "remove-hold": [foo.package.id, bar.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() @@ -1015,21 +1225,34 @@ [foo] = self.facade.get_packages_by_name("foo") self.facade.set_package_hold(foo) self.facade.reload_channels() - self.store.add_task("changer", {"type": "change-packages", - "hold": [foo.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "hold": [foo.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Package holds successfully changed.", - "result-code": 1}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Package holds successfully changed.", + "result-code": 1, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1044,35 +1267,46 @@ requests won't get processed. """ self._add_system_package("foo", version="1.0") - self._add_package_to_deb_dir( - self.repository_dir, "foo", version="2.0") + self._add_package_to_deb_dir(self.repository_dir, "foo", version="2.0") self._add_system_package("bar", version="1.0") - self._add_package_to_deb_dir( - self.repository_dir, "bar", version="2.0") + self._add_package_to_deb_dir(self.repository_dir, "bar", version="2.0") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) - self.store.set_hash_ids({self.facade.get_package_hash(foo1): 1, - self.facade.get_package_hash(foo2): 2, - self.facade.get_package_hash(bar1): 3, - self.facade.get_package_hash(bar2): 4}) - self.facade.reload_channels() - self.store.add_task("changer", {"type": "change-packages", - "hold": [2, 3], - "operation-id": 123}) + self.store.set_hash_ids( + { + self.facade.get_package_hash(foo1): 1, + self.facade.get_package_hash(foo2): 2, + self.facade.get_package_hash(bar1): 3, + self.facade.get_package_hash(bar2): 4, + }, + ) + self.facade.reload_channels() + self.store.add_task( + "changer", + {"type": "change-packages", "hold": [2, 3], "operation-id": 123}, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Cannot perform the changes, since the" + - " following packages are not installed: foo", - "result-code": 100}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Cannot perform the changes, since the" + + " following packages are not installed: foo", + "result-code": 100, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1096,26 +1330,37 @@ [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") [baz] = self.facade.get_packages_by_name("baz") - self.store.add_task("changer", {"type": "change-packages", - "hold": [foo.package.id, - bar.package.id, - baz.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "hold": [foo.package.id, bar.package.id, baz.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Cannot perform the changes, since the " - "following packages are not installed: " - "%s, %s" % tuple(sorted([bar.package.name, - baz.package.name])), - "result-code": 100}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Cannot perform the changes, since the " + "following packages are not installed: " + "%s, %s" + % tuple(sorted([bar.package.name, baz.package.name])), + "result-code": 100, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1127,10 +1372,10 @@ synchronized message and a failure of the operation. """ - self.store.add_task("changer", - {"type": "change-packages", - "hold": [123], - "operation-id": 123}) + self.store.add_task( + "changer", + {"type": "change-packages", "hold": [123], "operation-id": 123}, + ) thetime = time.time() with patch("time.time") as time_mock: @@ -1138,17 +1383,22 @@ result = self.changer.handle_tasks() time_mock.assert_any_call() - self.assertIn("Package data not yet synchronized with server (123)", - self.logfile.getvalue()) + self.assertIn( + "Package data not yet synchronized with server (123)", + self.logfile.getvalue(), + ) def got_result(result): - message = {"type": "change-packages-result", - "operation-id": 123, - "result-code": 100, - "result-text": "Package data has changed. " - "Please retry the operation."} + message = { + "type": "change-packages-result", + "operation-id": 123, + "result-code": 100, + "result-text": "Package data has changed. " + "Please retry the operation.", + } self.assertMessages(self.get_pending_messages(), [message]) self.assertEqual(self.store.get_next_task("changer"), None) + return result.addCallback(got_result) def test_change_package_holds_delete_not_held(self): @@ -1162,21 +1412,34 @@ self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") [foo] = self.facade.get_packages_by_name("foo") - self.store.add_task("changer", {"type": "change-packages", - "remove-hold": [foo.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "remove-hold": [foo.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Package holds successfully changed.", - "result-code": 1}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Package holds successfully changed.", + "result-code": 1, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1189,31 +1452,47 @@ hold is removed. """ self._add_system_package("foo", version="1.0") - self._add_package_to_deb_dir( - self.repository_dir, "foo", version="2.0") + self._add_package_to_deb_dir(self.repository_dir, "foo", version="2.0") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) - self.store.set_hash_ids({self.facade.get_package_hash(foo1): 1, - self.facade.get_package_hash(foo2): 2}) + self.store.set_hash_ids( + { + self.facade.get_package_hash(foo1): 1, + self.facade.get_package_hash(foo2): 2, + }, + ) self.facade.mark_install(foo1) self.facade.mark_hold(foo1) self.facade.perform_changes() self.facade.reload_channels() - self.store.add_task("changer", {"type": "change-packages", - "remove-hold": [2], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "remove-hold": [2], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Package holds successfully changed.", - "result-code": 1}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Package holds successfully changed.", + "result-code": 1, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1229,21 +1508,34 @@ self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") [foo] = self.facade.get_packages_by_name("foo") - self.store.add_task("changer", {"type": "change-packages", - "remove-hold": [foo.package.id], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "remove-hold": [foo.package.id], + "operation-id": 123, + }, + ) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) - self.assertIn("Queuing response with change package results " - "to exchange urgently.", self.logfile.getvalue()) + self.assertIn( + "Queuing response with change package results " + "to exchange urgently.", + self.logfile.getvalue(), + ) self.assertMessages( self.get_pending_messages(), - [{"type": "change-packages-result", - "operation-id": 123, - "result-text": "Package holds successfully changed.", - "result-code": 1}]) + [ + { + "type": "change-packages-result", + "operation-id": 123, + "result-text": "Package holds successfully changed.", + "result-code": 1, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1254,19 +1546,31 @@ change-package-locks activities, since it can't add or remove locks because apt doesn't support this. """ - self.store.add_task("changer", {"type": "change-package-locks", - "create": [("foo", ">=", "1.0")], - "delete": [("bar", None, None)], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-package-locks", + "create": [("foo", ">=", "1.0")], + "delete": [("bar", None, None)], + "operation-id": 123, + }, + ) def assert_result(result): self.assertMessages( self.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 123, - "status": FAILED, - "result-text": "This client doesn't support package locks.", - "result-code": 1}]) + [ + { + "type": "operation-result", + "operation-id": 123, + "status": FAILED, + "result-text": ( + "This client doesn't support package locks." + ), + "result-code": 1, + }, + ], + ) result = self.changer.handle_tasks() return result.addCallback(assert_result) @@ -1276,27 +1580,40 @@ After the C{change-packages} handler has installed the binaries, the binaries and the internal facade deb source is removed. """ - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + }, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 1, - "result-text": "Yeah, I did whatever you've " - "asked for!", - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 1, + "result-text": "Yeah, I did whatever you've " + "asked for!", + "type": "change-packages-result", + }, + ], + ) self.assertEqual([], os.listdir(self.config.binaries_path)) self.assertFalse( - os.path.exists(self.facade._get_internal_sources_list())) + os.path.exists(self.facade._get_internal_sources_list()), + ) return result.addCallback(got_result) @@ -1306,27 +1623,41 @@ A C{ShutdownProtocolProcess} is created and the package result change is returned. """ - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123, - "reboot-if-necessary": True}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + "reboot-if-necessary": True, + }, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): - self.assertIn("Landscape is rebooting the system", - self.logfile.getvalue()) - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 1, - "result-text": "Yeah, I did whatever you've " - "asked for!", - "type": "change-packages-result"}]) + self.assertIn( + "Landscape is rebooting the system", + self.logfile.getvalue(), + ) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 1, + "result-text": "Yeah, I did whatever you've " + "asked for!", + "type": "change-packages-result", + }, + ], + ) self.landscape_reactor.advance(5) [arguments] = self.process_factory.spawns @@ -1342,25 +1673,37 @@ A C{ShutdownProtocol} is created and the package result change is returned, even if the reboot fails. """ - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123, - "reboot-if-necessary": True}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + "reboot-if-necessary": True, + }, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): - self.assertMessages(self.get_pending_messages(), - [{"operation-id": 123, - "result-code": 1, - "result-text": "Yeah, I did whatever you've " - "asked for!", - "type": "change-packages-result"}]) + self.assertMessages( + self.get_pending_messages(), + [ + { + "operation-id": 123, + "result-code": 1, + "result-text": "Yeah, I did whatever you've " + "asked for!", + "type": "change-packages-result", + }, + ], + ) self.log_helper.ignore_errors(ShutdownFailedError) self.landscape_reactor.advance(5) @@ -1374,14 +1717,20 @@ """ After initiating a reboot process, no more messages are exchanged. """ - self.store.add_task("changer", - {"type": "change-packages", "install": [2], - "binaries": [(HASH2, 2, PKGDEB2)], - "operation-id": 123, - "reboot-if-necessary": True}) + self.store.add_task( + "changer", + { + "type": "change-packages", + "install": [2], + "binaries": [(HASH2, 2, PKGDEB2)], + "operation-id": 123, + "reboot-if-necessary": True, + }, + ) def return_good_result(self): return "Yeah, I did whatever you've asked for!" + self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() @@ -1406,6 +1755,7 @@ """ Invoking L{PackageChanger.run} results in the session ID being fetched. """ + def assert_session_id(ignored): self.assertTrue(self.changer._session_id is not None) diff -Nru landscape-client-23.02/landscape/client/package/tests/test_releaseupgrader.py landscape-client-23.08/landscape/client/package/tests/test_releaseupgrader.py --- landscape-client-23.02/landscape/client/package/tests/test_releaseupgrader.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/tests/test_releaseupgrader.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,49 +1,61 @@ -import mock import os import signal import tarfile import unittest +from unittest import mock from twisted.internet import reactor -from twisted.internet.defer import succeed, fail, Deferred - +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import succeed + +from landscape.client.manager.manager import FAILED +from landscape.client.manager.manager import SUCCEEDED +from landscape.client.package.releaseupgrader import main +from landscape.client.package.releaseupgrader import ReleaseUpgrader +from landscape.client.package.releaseupgrader import ( + ReleaseUpgraderConfiguration, +) +from landscape.client.tests.helpers import BrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest from landscape.lib.apt.package.store import PackageStore -from landscape.lib.gpg import InvalidGPGSignature from landscape.lib.fetch import HTTPCodeError -from landscape.lib.testing import LogKeeperHelper, EnvironSaverHelper -from landscape.client.package.releaseupgrader import ( - ReleaseUpgrader, ReleaseUpgraderConfiguration, main) -from landscape.client.tests.helpers import LandscapeTest, BrokerServiceHelper -from landscape.client.manager.manager import SUCCEEDED, FAILED +from landscape.lib.gpg import InvalidGPGSignature +from landscape.lib.testing import EnvironSaverHelper +from landscape.lib.testing import LogKeeperHelper class ReleaseUpgraderConfigurationTest(unittest.TestCase): - def test_upgrade_tool_directory(self): """ L{ReleaseUpgraderConfiguration.upgrade_tool_directory} returns the path to the directory holding the fetched upgrade-tool files. """ config = ReleaseUpgraderConfiguration() - self.assertEqual(config.upgrade_tool_directory, - os.path.join(config.package_directory, - "upgrade-tool")) + self.assertEqual( + config.upgrade_tool_directory, + os.path.join(config.package_directory, "upgrade-tool"), + ) class ReleaseUpgraderTest(LandscapeTest): - helpers = [LogKeeperHelper, - EnvironSaverHelper, BrokerServiceHelper] + helpers = [LogKeeperHelper, EnvironSaverHelper, BrokerServiceHelper] def setUp(self): - super(ReleaseUpgraderTest, self).setUp() + super().setUp() self.config = ReleaseUpgraderConfiguration() self.config.data_path = self.makeDir() os.mkdir(self.config.package_directory) os.mkdir(self.config.upgrade_tool_directory) self.store = PackageStore(self.makeFile()) - self.upgrader = ReleaseUpgrader(self.store, None, - self.remote, self.config, None) + self.upgrader = ReleaseUpgrader( + self.store, + None, + self.remote, + self.config, + None, + ) service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) @@ -61,7 +73,8 @@ method_returns = { tarball_url: succeed(b"tarball"), - signature_url: succeed(b"signature")} + signature_url: succeed(b"signature"), + } def side_effect(param): return method_returns[param] @@ -69,17 +82,22 @@ fetch_mock.side_effect = side_effect os.rmdir(self.config.upgrade_tool_directory) - result = self.upgrader.fetch(tarball_url, - signature_url) + result = self.upgrader.fetch(tarball_url, signature_url) def check_result(ignored): directory = self.config.upgrade_tool_directory self.assertFileContent( - os.path.join(directory, "karmic.tar.gz"), b"tarball") + os.path.join(directory, "karmic.tar.gz"), + b"tarball", + ) self.assertFileContent( - os.path.join(directory, "karmic.tar.gz.gpg"), b"signature") - self.assertIn("INFO: Successfully fetched upgrade-tool files", - self.logfile.getvalue()) + os.path.join(directory, "karmic.tar.gz.gpg"), + b"signature", + ) + self.assertIn( + "INFO: Successfully fetched upgrade-tool files", + self.logfile.getvalue(), + ) calls = [mock.call(tarball_url), mock.call(signature_url)] fetch_mock.assert_has_calls(calls, any_order=True) @@ -97,22 +115,26 @@ method_returns = { tarball_url: succeed(b"tarball"), - signature_url: fail(HTTPCodeError(404, b"not found"))} + signature_url: fail(HTTPCodeError(404, b"not found")), + } def side_effect(param): return method_returns[param] fetch_mock.side_effect = side_effect - result = self.upgrader.fetch(tarball_url, - signature_url) + result = self.upgrader.fetch(tarball_url, signature_url) def check_failure(failure): - self.assertIn("WARNING: Couldn't fetch file from %s (Server return" - "ed HTTP code 404)" % signature_url, - self.logfile.getvalue()) - self.assertIn("WARNING: Couldn't fetch all upgrade-tool files", - self.logfile.getvalue()) + self.assertIn( + f"WARNING: Couldn't fetch file from {signature_url} " + "(Server returned HTTP code 404)", + self.logfile.getvalue(), + ) + self.assertIn( + "WARNING: Couldn't fetch all upgrade-tool files", + self.logfile.getvalue(), + ) calls = [mock.call(tarball_url), mock.call(signature_url)] fetch_mock.assert_has_calls(calls, any_order=True) @@ -120,8 +142,10 @@ result.addErrback(check_failure) return result - @mock.patch("landscape.client.package.releaseupgrader.gpg_verify", - return_value=succeed(True)) + @mock.patch( + "landscape.client.package.releaseupgrader.gpg_verify", + return_value=succeed(True), + ) def test_verify(self, gpg_mock): """ L{ReleaseUpgrader.verify} verifies the upgrade tool tarball against @@ -133,10 +157,14 @@ result = self.upgrader.verify(tarball_filename, signature_filename) def check_result(ignored): - self.assertIn("INFO: Successfully verified upgrade-tool tarball", - self.logfile.getvalue()) + self.assertIn( + "INFO: Successfully verified upgrade-tool tarball", + self.logfile.getvalue(), + ) gpg_mock.assert_called_once_with( - tarball_filename, signature_filename) + tarball_filename, + signature_filename, + ) result.addCallback(check_result) return result @@ -154,10 +182,15 @@ result = self.upgrader.verify(tarball_filename, signature_filename) def check_failure(failure): - self.assertIn("WARNING: Invalid signature for upgrade-tool " - "tarball: gpg error", self.logfile.getvalue()) + self.assertIn( + "WARNING: Invalid signature for upgrade-tool " + "tarball: gpg error", + self.logfile.getvalue(), + ) gpg_mock.assert_called_once_with( - tarball_filename, signature_filename) + tarball_filename, + signature_filename, + ) result.addCallback(self.fail) result.addErrback(check_failure) @@ -189,16 +222,22 @@ The L{ReleaseUpgrader.tweak} method adds the Landscape PPA repository to the list of available mirrors. """ - mirrors_filename = os.path.join(self.config.upgrade_tool_directory, - "mirrors.cfg") - self.makeFile(path=mirrors_filename, - content="ftp://ftp.lug.ro/ubuntu/\n") + mirrors_filename = os.path.join( + self.config.upgrade_tool_directory, + "mirrors.cfg", + ) + self.makeFile( + path=mirrors_filename, + content="ftp://ftp.lug.ro/ubuntu/\n", + ) def check_result(ignored): - self.assertFileContent(mirrors_filename, - b"ftp://ftp.lug.ro/ubuntu/\n" - b"http://ppa.launchpad.net/landscape/" - b"trunk/ubuntu/\n") + self.assertFileContent( + mirrors_filename, + b"ftp://ftp.lug.ro/ubuntu/\n" + b"http://ppa.launchpad.net/landscape/" + b"trunk/ubuntu/\n", + ) result = self.upgrader.tweak("hardy") result.addCallback(check_result) @@ -208,8 +247,7 @@ """ The default directory for the upgrade-tool logs is the system one. """ - self.assertEqual(self.upgrader.logs_directory, - "/var/log/dist-upgrade") + self.assertEqual(self.upgrader.logs_directory, "/var/log/dist-upgrade") def test_default_logs_limit(self): """ @@ -223,22 +261,28 @@ the process standard output, error and log files. """ self.upgrader.logs_directory = self.makeDir() - self.makeFile(basename="main.log", - dirname=self.upgrader.logs_directory, - content="main log") - self.makeFile(basename="apt.log", - dirname=self.upgrader.logs_directory, - content="apt log") + self.makeFile( + basename="main.log", + dirname=self.upgrader.logs_directory, + content="main log", + ) + self.makeFile( + basename="apt.log", + dirname=self.upgrader.logs_directory, + content="apt log", + ) text = self.upgrader.make_operation_result_text("stdout", "stderr") - self.assertEqual(text, - "=== Standard output ===\n\n" - "stdout\n\n" - "=== Standard error ===\n\n" - "stderr\n\n" - "=== apt.log ===\n\n" - "apt log\n\n" - "=== main.log ===\n\n" - "main log\n\n") + self.assertEqual( + text, + "=== Standard output ===\n\n" + "stdout\n\n" + "=== Standard error ===\n\n" + "stderr\n\n" + "=== apt.log ===\n\n" + "apt log\n\n" + "=== main.log ===\n\n" + "main log\n\n", + ) def test_make_operation_result_text_with_no_stderr(self): """ @@ -247,9 +291,7 @@ """ self.upgrader.logs_directory = self.makeDir() text = self.upgrader.make_operation_result_text("stdout", "") - self.assertEqual(text, - "=== Standard output ===\n\n" - "stdout\n\n") + self.assertEqual(text, "=== Standard output ===\n\n" "stdout\n\n") def test_make_operation_result_text_only_considers_log_files(self): """ @@ -260,11 +302,13 @@ self.upgrader.logs_directory = self.makeDir() self.makeDir(dirname=self.upgrader.logs_directory) text = self.upgrader.make_operation_result_text("stdout", "stderr") - self.assertEqual(text, - "=== Standard output ===\n\n" - "stdout\n\n" - "=== Standard error ===\n\n" - "stderr\n\n") + self.assertEqual( + text, + "=== Standard output ===\n\n" + "stdout\n\n" + "=== Standard error ===\n\n" + "stderr\n\n", + ) def test_make_operation_result_text_trims_long_files(self): """ @@ -273,17 +317,21 @@ """ self.upgrader.logs_directory = self.makeDir() self.upgrader.logs_limit = 8 - self.makeFile(basename="main.log", - dirname=self.upgrader.logs_directory, - content="very long log") + self.makeFile( + basename="main.log", + dirname=self.upgrader.logs_directory, + content="very long log", + ) text = self.upgrader.make_operation_result_text("stdout", "stderr") - self.assertEqual(text, - "=== Standard output ===\n\n" - "stdout\n\n" - "=== Standard error ===\n\n" - "stderr\n\n" - "=== main.log ===\n\n" - "long log\n\n") + self.assertEqual( + text, + "=== Standard output ===\n\n" + "stdout\n\n" + "=== Standard error ===\n\n" + "stderr\n\n" + "=== main.log ===\n\n" + "long log\n\n", + ) def test_upgrade(self): """ @@ -294,11 +342,13 @@ upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") fd = open(upgrade_tool_filename, "w") - fd.write("#!/bin/sh\n" - "echo $@\n" - "echo FOO=$FOO\n" - "echo PWD=$PWD\n" - "echo out\n") + fd.write( + "#!/bin/sh\n" + "echo $@\n" + "echo FOO=$FOO\n" + "echo PWD=$PWD\n" + "echo out\n", + ) fd.close() os.chmod(upgrade_tool_filename, 0o755) env_backup = os.environ.copy() @@ -311,19 +361,29 @@ result = self.upgrader.upgrade("karmic", 100) def check_result(ignored): - self.assertIn("INFO: Queuing message with release upgrade " - "results to exchange urgently.", - self.logfile.getvalue()) - result_text = (u"=== Standard output ===\n\n" - "--frontend DistUpgradeViewNonInteractive\n" - "FOO=bar\n" - "PWD=%s\nout\n\n\n" % upgrade_tool_directory) - self.assertMessages(self.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 100, - "status": SUCCEEDED, - "result-text": result_text, - "result-code": 0}]) + self.assertIn( + "INFO: Queuing message with release upgrade " + "results to exchange urgently.", + self.logfile.getvalue(), + ) + result_text = ( + "=== Standard output ===\n\n" + "--frontend DistUpgradeViewNonInteractive\n" + "FOO=bar\n" + f"PWD={upgrade_tool_directory}\nout\n\n\n" + ) + self.assertMessages( + self.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 100, + "status": SUCCEEDED, + "result-text": result_text, + "result-code": 0, + }, + ], + ) result.addCallback(check_result) result.chainDeferred(deferred) @@ -345,10 +405,12 @@ upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") fd = open(upgrade_tool_filename, "w") - fd.write("#!/bin/sh\n" - "echo DEBUG_UPDATE_MANAGER=$DEBUG_UPDATE_MANAGER\n" - "echo RELEASE_UPRADER_ALLOW_THIRD_PARTY=" - "$RELEASE_UPRADER_ALLOW_THIRD_PARTY\n") + fd.write( + "#!/bin/sh\n" + "echo DEBUG_UPDATE_MANAGER=$DEBUG_UPDATE_MANAGER\n" + "echo RELEASE_UPRADER_ALLOW_THIRD_PARTY=" + "$RELEASE_UPRADER_ALLOW_THIRD_PARTY\n", + ) fd.close() os.chmod(upgrade_tool_filename, 0o755) env_backup = os.environ.copy() @@ -357,20 +419,31 @@ def do_test(): - result = self.upgrader.upgrade("karmic", 100, - allow_third_party=True, - debug=True) + result = self.upgrader.upgrade( + "karmic", + 100, + allow_third_party=True, + debug=True, + ) def check_result(ignored): - result_text = (u"=== Standard output ===\n\n" - "DEBUG_UPDATE_MANAGER=True\n" - "RELEASE_UPRADER_ALLOW_THIRD_PARTY=True\n\n\n") - self.assertMessages(self.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 100, - "status": SUCCEEDED, - "result-text": result_text, - "result-code": 0}]) + result_text = ( + "=== Standard output ===\n\n" + "DEBUG_UPDATE_MANAGER=True\n" + "RELEASE_UPRADER_ALLOW_THIRD_PARTY=True\n\n\n" + ) + self.assertMessages( + self.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 100, + "status": SUCCEEDED, + "result-text": result_text, + "result-code": 0, + }, + ], + ) result.addCallback(check_result) result.chainDeferred(deferred) @@ -392,10 +465,7 @@ upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") fd = open(upgrade_tool_filename, "w") - fd.write("#!/bin/sh\n" - "echo out\n" - "echo err >&2\n" - "exit 3") + fd.write("#!/bin/sh\n" "echo out\n" "echo err >&2\n" "exit 3") fd.close() os.chmod(upgrade_tool_filename, 0o755) @@ -406,14 +476,22 @@ result = self.upgrader.upgrade("karmic", 100) def check_result(ignored): - result_text = (u"=== Standard output ===\n\nout\n\n\n" - "=== Standard error ===\n\nerr\n\n\n") - self.assertMessages(self.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 100, - "status": FAILED, - "result-text": result_text, - "result-code": 3}]) + result_text = ( + "=== Standard output ===\n\nout\n\n\n" + "=== Standard error ===\n\nerr\n\n\n" + ) + self.assertMessages( + self.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 100, + "status": FAILED, + "result-text": result_text, + "result-code": 3, + }, + ], + ) result.addCallback(check_result) result.chainDeferred(deferred) @@ -433,24 +511,26 @@ upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") child_pid_filename = self.makeFile() fd = open(upgrade_tool_filename, "w") - fd.write("#!/usr/bin/env python3\n" - "import os\n" - "import time\n" - "import sys\n" - "if __name__ == '__main__':\n" - " print('First parent')\n" - " pid = os.fork()\n" - " if pid > 0:\n" - " time.sleep(0.5)\n" - " sys.exit(0)\n" - " pid = os.fork()\n" - " if pid > 0:\n" - " fd = open('%s', 'w')\n" - " fd.write(str(pid))\n" - " fd.close()\n" - " sys.exit(0)\n" - " while True:\n" - " time.sleep(2)\n" % child_pid_filename) + fd.write( + "#!/usr/bin/env python3\n" + "import os\n" + "import time\n" + "import sys\n" + "if __name__ == '__main__':\n" + " print('First parent')\n" + " pid = os.fork()\n" + " if pid > 0:\n" + " time.sleep(0.5)\n" + " sys.exit(0)\n" + " pid = os.fork()\n" + " if pid > 0:\n" + f" fd = open('{child_pid_filename}', 'w')\n" + " fd.write(str(pid))\n" + " fd.close()\n" + " sys.exit(0)\n" + " while True:\n" + " time.sleep(2)\n", + ) fd.close() os.chmod(upgrade_tool_filename, 0o755) os.environ.clear() @@ -462,7 +542,7 @@ result = self.upgrader.upgrade("karmic", 100) def kill_child(how): - fd = open(child_pid_filename, "r") + fd = open(child_pid_filename) child_pid = int(fd.read()) fd.close() os.remove(child_pid_filename) @@ -477,9 +557,11 @@ def check_result(ignored): force_kill_child.cancel() - self.assertIn("INFO: Queuing message with release upgrade " - "results to exchange urgently.", - self.logfile.getvalue()) + self.assertIn( + "INFO: Queuing message with release upgrade " + "results to exchange urgently.", + self.logfile.getvalue(), + ) kill_child("cleanly") result_text = self.get_pending_messages()[0]["result-text"] self.assertIn("First parent\n", result_text) @@ -509,9 +591,8 @@ self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\n" - "echo $@\n" - "echo $(pwd)\n") + "#!/bin/sh\n" "echo $@\n" "echo $(pwd)\n", + ) deferred = Deferred() @@ -523,7 +604,8 @@ self.assertFalse(os.path.exists(upgrade_tool_directory)) self.assertEqual( out, - ("--force-apt-update\n%s\n" % os.getcwd()).encode("utf-8")) + (f"--force-apt-update\n{os.getcwd()}\n").encode("utf-8"), + ) self.assertEqual(err, b"") self.assertEqual(code, 0) @@ -543,18 +625,28 @@ the landscape uid and gid. """ - class FakePwNam(object): + class FakePwNam: pw_uid = 1234 + getpwnam_mock.return_value = FakePwNam() - class FakeGrNam(object): + class FakeGrNam: gr_gid = 5678 + getgrnam_mock.return_value = FakeGrNam() spawn_process_calls = [] - def spawn_process(pp, reporter, args=None, uid=None, gid=None, - path=None, env=None, usePTY=None): + def spawn_process( + pp, + reporter, + args=None, + uid=None, + gid=None, + path=None, + env=None, + usePTY=None, + ): self.assertEqual(uid, 1234) self.assertEqual(gid, 5678) spawn_process_calls.append(True) @@ -580,7 +672,8 @@ self.write_script( self.config, "landscape-package-reporter", - "#!/bin/sh\necho $@\n") + "#!/bin/sh\necho $@\n", + ) self.config.config = "/some/config" deferred = Deferred() @@ -590,8 +683,10 @@ def check_result(args): out, err, code = args - self.assertEqual(out, b"--force-apt-update " - b"--config=/some/config\n") + self.assertEqual( + out, + b"--force-apt-update " b"--config=/some/config\n", + ) self.assertEqual(err, b"") self.assertEqual(code, 0) @@ -616,23 +711,34 @@ return succeed(None) def verify(tarball_filename, signature_filename): - self.assertEqual(tarball_filename, - os.path.join(upgrade_tool_directory, "tarball")) - self.assertEqual(signature_filename, - os.path.join(upgrade_tool_directory, "sign")) + self.assertEqual( + tarball_filename, + os.path.join(upgrade_tool_directory, "tarball"), + ) + self.assertEqual( + signature_filename, + os.path.join(upgrade_tool_directory, "sign"), + ) calls.append("verify") def extract(filename_tarball): - self.assertEqual(filename_tarball, - os.path.join(upgrade_tool_directory, "tarball")) + self.assertEqual( + filename_tarball, + os.path.join(upgrade_tool_directory, "tarball"), + ) calls.append("extract") def tweak(current_code_name): self.assertEqual(current_code_name, "jaunty") calls.append("tweak") - def upgrade(code_name, operation_id, allow_third_party=False, - debug=False, mode=None): + def upgrade( + code_name, + operation_id, + allow_third_party=False, + debug=False, + mode=None, + ): self.assertEqual(operation_id, 100) self.assertEqual(code_name, "karmic") self.assertTrue(allow_third_party) @@ -651,20 +757,25 @@ self.upgrader.finish = finish self.upgrader.lsb_release_filename = self.makeFile( - "DISTRIB_CODENAME=jaunty\n") + "DISTRIB_CODENAME=jaunty\n", + ) - message = {"type": "release-upgrade", - "code-name": "karmic", - "upgrade-tool-tarball-url": "http://some/tarball", - "upgrade-tool-signature-url": "http://some/sign", - "allow-third-party": True, - "operation-id": 100} + message = { + "type": "release-upgrade", + "code-name": "karmic", + "upgrade-tool-tarball-url": "http://some/tarball", + "upgrade-tool-signature-url": "http://some/sign", + "allow-third-party": True, + "operation-id": 100, + } result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): - self.assertEqual(calls, ["fetch", "verify", "extract", "tweak", - "upgrade", "finish"]) + self.assertEqual( + calls, + ["fetch", "verify", "extract", "tweak", "upgrade", "finish"], + ) result.addCallback(check_result) return result @@ -675,25 +786,36 @@ failure if the system is already running the desired release. """ self.upgrader.lsb_release_filename = self.makeFile( - "DISTRIB_CODENAME=karmic\n") + "DISTRIB_CODENAME=karmic\n", + ) - message = {"type": "release-upgrade", - "code-name": "karmic", - "operation-id": 100} + message = { + "type": "release-upgrade", + "code-name": "karmic", + "operation-id": 100, + } result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): - self.assertIn("INFO: Queuing message with release upgrade " - "failure to exchange urgently.", - self.logfile.getvalue()) - self.assertMessages(self.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 100, - "status": FAILED, - "result-text": "The system is already " - "running karmic.", - "result-code": 1}]) + self.assertIn( + "INFO: Queuing message with release upgrade " + "failure to exchange urgently.", + self.logfile.getvalue(), + ) + self.assertMessages( + self.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 100, + "status": FAILED, + "result-text": "The system is already " + "running karmic.", + "result-code": 1, + }, + ], + ) result.addCallback(check_result) return result @@ -704,7 +826,8 @@ failure if any of the helper method errbacks. """ self.upgrader.lsb_release_filename = self.makeFile( - "DISTRIB_CODENAME=jaunty\n") + "DISTRIB_CODENAME=jaunty\n", + ) calls = [] @@ -735,24 +858,34 @@ self.upgrader.upgrade = upgrade self.upgrader.finish = finish - message = {"type": "release-upgrade", - "code-name": "karmic", - "operation-id": 100, - "upgrade-tool-tarball-url": "http://some/tarball", - "upgrade-tool-signature-url": "http://some/signature"} + message = { + "type": "release-upgrade", + "code-name": "karmic", + "operation-id": 100, + "upgrade-tool-tarball-url": "http://some/tarball", + "upgrade-tool-signature-url": "http://some/signature", + } result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): - self.assertIn("INFO: Queuing message with release upgrade " - "failure to exchange urgently.", - self.logfile.getvalue()) - self.assertMessages(self.get_pending_messages(), - [{"type": "operation-result", - "operation-id": 100, - "status": FAILED, - "result-text": "failure", - "result-code": 1}]) + self.assertIn( + "INFO: Queuing message with release upgrade " + "failure to exchange urgently.", + self.logfile.getvalue(), + ) + self.assertMessages( + self.get_pending_messages(), + [ + { + "type": "operation-result", + "operation-id": 100, + "status": FAILED, + "result-text": "failure", + "result-code": 1, + }, + ], + ) self.assertEqual(calls, ["fetch", "verify"]) result.addCallback(check_result) @@ -767,7 +900,7 @@ message = {"type": "release-upgrade"} - class FakeTask(object): + class FakeTask: data = message task = FakeTask() @@ -780,15 +913,17 @@ """ message = {"type": "foo"} - class FakeTask(object): + class FakeTask: data = message self.assertEqual(self.upgrader.handle_task(FakeTask()), None) @mock.patch("os.setsid") @mock.patch("os.getpgrp") - @mock.patch("landscape.client.package.releaseupgrader.run_task_handler", - return_value="RESULT") + @mock.patch( + "landscape.client.package.releaseupgrader.run_task_handler", + return_value="RESULT", + ) def test_main(self, run_task_handler, getpgrp_mock, setsid_mock): """ The L{main} function creates a new session if the process is not diff -Nru landscape-client-23.02/landscape/client/package/tests/test_reporter.py landscape-client-23.08/landscape/client/package/tests/test_reporter.py --- landscape-client-23.02/landscape/client/package/tests/test_reporter.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/tests/test_reporter.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,46 +1,59 @@ import locale -import sys import os -import time -import apt_pkg -import mock import shutil import subprocess +import sys +import time +from unittest import mock -from twisted.internet.defer import Deferred, succeed, fail, inlineCallbacks +import apt_pkg from twisted.internet import reactor +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import inlineCallbacks +from twisted.internet.defer import succeed - +from landscape.client.package import reporter +from landscape.client.package.reporter import FakeGlobalReporter +from landscape.client.package.reporter import FakeReporter +from landscape.client.package.reporter import find_reporter_command +from landscape.client.package.reporter import HASH_ID_REQUEST_TIMEOUT +from landscape.client.package.reporter import main +from landscape.client.package.reporter import PackageReporter +from landscape.client.package.reporter import PackageReporterConfiguration +from landscape.client.tests.helpers import BrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest from landscape.lib import bpickle from landscape.lib.apt.package.facade import AptFacade -from landscape.lib.apt.package.store import ( - PackageStore, UnknownHashIDRequest, FakePackageStore) -from landscape.lib.apt.package.testing import ( - AptFacadeHelper, SimpleRepositoryHelper, - HASH1, HASH2, HASH3, PKGNAME1) -from landscape.lib.fs import create_text_file, touch_file +from landscape.lib.apt.package.store import FakePackageStore +from landscape.lib.apt.package.store import PackageStore +from landscape.lib.apt.package.store import UnknownHashIDRequest +from landscape.lib.apt.package.testing import AptFacadeHelper +from landscape.lib.apt.package.testing import HASH1 +from landscape.lib.apt.package.testing import HASH2 +from landscape.lib.apt.package.testing import HASH3 +from landscape.lib.apt.package.testing import PKGNAME1 +from landscape.lib.apt.package.testing import SimpleRepositoryHelper from landscape.lib.fetch import FetchError -from landscape.lib.lsb_release import parse_lsb_release, LSB_RELEASE_FILENAME -from landscape.lib.testing import EnvironSaverHelper, FakeReactor -from landscape.client.package.reporter import ( - PackageReporter, HASH_ID_REQUEST_TIMEOUT, main, find_reporter_command, - PackageReporterConfiguration, FakeGlobalReporter, FakeReporter) -from landscape.client.package import reporter -from landscape.client.tests.helpers import LandscapeTest, BrokerServiceHelper +from landscape.lib.fs import create_text_file +from landscape.lib.fs import touch_file +from landscape.lib.lsb_release import LSB_RELEASE_FILENAME +from landscape.lib.lsb_release import parse_lsb_release +from landscape.lib.testing import EnvironSaverHelper +from landscape.lib.testing import FakeReactor SAMPLE_LSB_RELEASE = "DISTRIB_CODENAME=codename\n" class PackageReporterConfigurationTest(LandscapeTest): - def test_force_apt_update_option(self): """ The L{PackageReporterConfiguration} supports a '--force-apt-update' command line option. """ config = PackageReporterConfiguration() - config.default_config_filenames = (self.makeFile(""), ) + config.default_config_filenames = (self.makeFile(""),) self.assertFalse(config.force_apt_update) config.load(["--force-apt-update"]) self.assertTrue(config.force_apt_update) @@ -53,12 +66,17 @@ Facade = AptFacade def setUp(self): - super(PackageReporterAptTest, self).setUp() + super().setUp() self.store = PackageStore(self.makeFile()) self.config = PackageReporterConfiguration() self.reactor = FakeReactor() self.reporter = PackageReporter( - self.store, self.facade, self.remote, self.config, self.reactor) + self.store, + self.facade, + self.remote, + self.config, + self.reactor, + ) self.reporter.get_session_id() # Assume update-notifier-common stamp file is not present by # default. @@ -77,7 +95,10 @@ Return the hash of the package that upgrades "name1". """ self._add_package_to_deb_dir( - self.repository_dir, "name1", version="version2") + self.repository_dir, + "name1", + version="version2", + ) self.facade.reload_channels() name1_upgrade = sorted(self.facade.get_packages_by_name("name1"))[1] return self.facade.get_package_hash(name1_upgrade) @@ -99,9 +120,10 @@ """Create a fake apt-update executable""" self.reporter.apt_update_filename = self.makeFile( "#!/bin/sh\n" - "echo -n '%s'\n" - "echo -n '%s' >&2\n" - "exit %d" % (out, err, code)) + f"echo -n '{out}'\n" + f"echo -n '{err}' >&2\n" + f"exit {code:d}", + ) os.chmod(self.reporter.apt_update_filename, 0o755) def test_set_package_ids_with_all_known(self): @@ -109,9 +131,14 @@ request2 = self.store.add_hash_id_request([b"hash3", b"hash4"]) self.store.add_hash_id_request([b"hash5", b"hash6"]) - self.store.add_task("reporter", - {"type": "package-ids", "ids": [123, 456], - "request-id": request2.id}) + self.store.add_task( + "reporter", + { + "type": "package-ids", + "ids": [123, 456], + "request-id": request2.id, + }, + ) def got_result(result): self.assertEqual(self.store.get_hash_id(b"hash1"), None) @@ -126,18 +153,20 @@ def test_set_package_ids_with_unknown_request_id(self): - self.store.add_task("reporter", - {"type": "package-ids", "ids": [123, 456], - "request-id": 123}) + self.store.add_task( + "reporter", + {"type": "package-ids", "ids": [123, 456], "request-id": 123}, + ) # Nothing bad should happen. return self.reporter.handle_tasks() def test_set_package_ids_py27(self): """Check py27 upgraded messages are decoded.""" - self.store.add_task("reporter", - {"type": b"package-ids", "ids": [123, 456], - "request-id": 123}) + self.store.add_task( + "reporter", + {"type": b"package-ids", "ids": [123, 456], "request-id": 123}, + ) result = self.reporter.handle_tasks() self.assertIsInstance(result, Deferred) @@ -147,7 +176,7 @@ self.store.add_task("reporter", {"type": "spam"}) result = self.reporter.handle_tasks() self.assertIsInstance(result, Deferred) - expected = "Unknown task message type: {!r}".format(u"spam") # py2/3 + expected = "Unknown task message type: {!r}".format("spam") # py2/3 mock_warn.assert_called_once_with(expected) def test_set_package_ids_with_unknown_hashes(self): @@ -157,10 +186,14 @@ request1 = self.store.add_hash_id_request([b"foo", HASH1, b"bar"]) - self.store.add_task("reporter", - {"type": "package-ids", - "ids": [123, None, 456], - "request-id": request1.id}) + self.store.add_task( + "reporter", + { + "type": "package-ids", + "ids": [123, None, 456], + "request-id": request1.id, + }, + ) def got_result(result): message = message_store.get_pending_messages()[0] @@ -181,26 +214,39 @@ self.assertMessages( message_store.get_pending_messages(), - [{"packages": [{"description": u"Description1", + [ + { + "packages": [ + { + "description": "Description1", "installed-size": 28672, - "name": u"name1", - "relations": - [(131074, u"providesname1"), - (196610, u"name1 = version1-release1"), - (262148, - u"prerequirename1 = prerequireversion1"), - (262148, - u"requirename1 = requireversion1"), - (393224, u"name1 < version1-release1"), - (458768, - u"conflictsname1 = conflictsversion1")], - "section": u"Group1", + "name": "name1", + "relations": [ + (131074, "providesname1"), + (196610, "name1 = version1-release1"), + ( + 262148, + "prerequirename1 = prerequireversion1", + ), + (262148, "requirename1 = requireversion1"), + (393224, "name1 < version1-release1"), + ( + 458768, + "conflictsname1 = conflictsversion1", + ), + ], + "section": "Group1", "size": 1038, - "summary": u"Summary1", + "summary": "Summary1", "type": 65537, - "version": u"version1-release1"}], - "request-id": request2.id, - "type": "add-packages"}]) + "version": "version1-release1", + }, + ], + "request-id": request2.id, + "type": "add-packages", + }, + ], + ) deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result) @@ -212,10 +258,14 @@ request1 = self.store.add_hash_id_request([b"foo", HASH1, b"bar"]) - self.store.add_task("reporter", - {"type": "package-ids", - "ids": [123, None, 456], - "request-id": request1.id}) + self.store.add_task( + "reporter", + { + "type": "package-ids", + "ids": [123, None, 456], + "request-id": request1.id, + }, + ) def got_result(result, mocked_get_package_skeleton): self.assertTrue(mocked_get_package_skeleton.called) @@ -223,25 +273,34 @@ request2 = self.store.get_hash_id_request(message["request-id"]) self.assertMessages( message_store.get_pending_messages(), - [{"packages": [{"description": u"Description1", + [ + { + "packages": [ + { + "description": "Description1", "installed-size": None, - "name": u"name1", + "name": "name1", "relations": [], - "section": u"Group1", + "section": "Group1", "size": None, - "summary": u"Summary1", + "summary": "Summary1", "type": 65537, - "version": u"version1-release1"}], - "request-id": request2.id, - "type": "add-packages"}]) + "version": "version1-release1", + }, + ], + "request-id": request2.id, + "type": "add-packages", + }, + ], + ) - class FakePackage(object): + class FakePackage: type = 65537 - name = u"name1" - version = u"version1-release1" - section = u"Group1" - summary = u"Summary1" - description = u"Description1" + name = "name1" + version = "version1-release1" + section = "Group1" + summary = "Summary1" + description = "Description1" size = None installed_size = None relations = [] @@ -249,8 +308,11 @@ def get_hash(self): return HASH1 # Need to match the hash of the initial request - with mock.patch.object(self.Facade, "get_package_skeleton", - return_value=FakePackage()) as mocked: + with mock.patch.object( + self.Facade, + "get_package_skeleton", + return_value=FakePackage(), + ) as mocked: deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result, mocked) @@ -266,19 +328,27 @@ request_id = self.store.add_hash_id_request([b"foo", HASH1, b"bar"]).id - self.store.add_task("reporter", {"type": "package-ids", - "ids": [123, None, 456], - "request-id": request_id}) + self.store.add_task( + "reporter", + { + "type": "package-ids", + "ids": [123, None, 456], + "request-id": request_id, + }, + ) def got_result(result, send_mock): send_mock.assert_called_once_with(mock.ANY, mock.ANY, True) self.assertMessages(message_store.get_pending_messages(), []) self.assertEqual( [request.id for request in self.store.iter_hash_id_requests()], - [request_id]) + [request_id], + ) with mock.patch.object( - self.reporter._broker, "send_message") as send_mock: + self.reporter._broker, + "send_message", + ) as send_mock: send_mock.return_value = deferred result = self.reporter.handle_tasks() self.assertFailure(result, Boom) @@ -286,18 +356,25 @@ def test_set_package_ids_removes_request_id_when_done(self): request = self.store.add_hash_id_request([b"hash1"]) - self.store.add_task("reporter", {"type": "package-ids", "ids": [123], - "request-id": request.id}) + self.store.add_task( + "reporter", + {"type": "package-ids", "ids": [123], "request-id": request.id}, + ) def got_result(result): - self.assertRaises(UnknownHashIDRequest, - self.store.get_hash_id_request, request.id) + self.assertRaises( + UnknownHashIDRequest, + self.store.get_hash_id_request, + request.id, + ) deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result) - @mock.patch("landscape.client.package.reporter.fetch_async", - return_value=succeed(b"hash-ids")) + @mock.patch( + "landscape.client.package.reporter.fetch_async", + return_value=succeed(b"hash-ids"), + ) @mock.patch("logging.info", return_value=None) def test_fetch_hash_id_db(self, logging_mock, mock_fetch_async): @@ -305,8 +382,12 @@ self.config.data_path = self.makeDir() self.config.package_hash_id_url = "http://fake.url/path/" os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) - hash_id_db_filename = os.path.join(self.config.data_path, "package", - "hash-id", "uuid_codename_arch") + hash_id_db_filename = os.path.join( + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) # Fake uuid, codename and arch message_store = self.broker_service.message_store @@ -326,16 +407,23 @@ def callback(ignored): self.assertTrue(os.path.exists(hash_id_db_filename)) self.assertEqual(open(hash_id_db_filename).read(), "hash-ids") + result.addCallback(callback) logging_mock.assert_called_once_with( - "Downloaded hash=>id database from %s" % hash_id_db_url) + f"Downloaded hash=>id database from {hash_id_db_url}", + ) mock_fetch_async.assert_called_once_with( - hash_id_db_url, cainfo=None, proxy=None) + hash_id_db_url, + cainfo=None, + proxy=None, + ) return result - @mock.patch("landscape.client.package.reporter.fetch_async", - return_value=succeed(b"hash-ids")) + @mock.patch( + "landscape.client.package.reporter.fetch_async", + return_value=succeed(b"hash-ids"), + ) @mock.patch("logging.info", return_value=None) def test_fetch_hash_id_db_with_proxy(self, logging_mock, mock_fetch_async): """fetching hash-id-db uses proxy settings""" @@ -358,7 +446,10 @@ result = self.reporter.fetch_hash_id_db() mock_fetch_async.assert_called_once_with( - hash_id_db_url, cainfo=None, proxy="http://helloproxy:8000") + hash_id_db_url, + cainfo=None, + proxy="http://helloproxy:8000", + ) return result @mock.patch("landscape.client.package.reporter.fetch_async") @@ -368,8 +459,12 @@ self.config.package_hash_id_url = "http://fake.url/path/" self.config.data_path = self.makeDir() os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) - hash_id_db_filename = os.path.join(self.config.data_path, "package", - "hash-id", "uuid_codename_arch") + hash_id_db_filename = os.path.join( + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) open(hash_id_db_filename, "w").write("test") # Fake uuid, codename and arch @@ -403,7 +498,8 @@ result = self.reporter.fetch_hash_id_db() logging_mock.assert_called_once_with( "Couldn't determine which hash=>id database to use: " - "server UUID not available") + "server UUID not available", + ) return result @mock.patch("logging.warning", return_value=None) @@ -420,7 +516,8 @@ logging_mock.assert_called_once_with( "Couldn't determine which hash=>id database to use: " - "missing code-name key in %s" % self.reporter.lsb_release_filename) + f"missing code-name key in {self.reporter.lsb_release_filename}", + ) return result @mock.patch("logging.warning", return_value=None) @@ -437,19 +534,26 @@ result = self.reporter.fetch_hash_id_db() logging_mock.assert_called_once_with( "Couldn't determine which hash=>id database to use: " - "unknown dpkg architecture") + "unknown dpkg architecture", + ) return result - @mock.patch("landscape.client.package.reporter.fetch_async", - return_value=succeed(b"hash-ids")) + @mock.patch( + "landscape.client.package.reporter.fetch_async", + return_value=succeed(b"hash-ids"), + ) def test_fetch_hash_id_db_with_default_url(self, mock_fetch_async): # Let's say package_hash_id_url is not set but url is self.config.data_path = self.makeDir() self.config.package_hash_id_url = None self.config.url = "http://fake.url/path/message-system/" os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) - hash_id_db_filename = os.path.join(self.config.data_path, "package", - "hash-id", "uuid_codename_arch") + hash_id_db_filename = os.path.join( + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) # Fake uuid, codename and arch message_store = self.broker_service.message_store @@ -458,24 +562,34 @@ self.facade.set_arch("arch") # Check fetch_async is called with the default url - hash_id_db_url = "http://fake.url/path/hash-id-databases/" \ - "uuid_codename_arch" + hash_id_db_url = ( + "http://fake.url/path/hash-id-databases/" "uuid_codename_arch" + ) result = self.reporter.fetch_hash_id_db() # Check the database def callback(ignored): self.assertTrue(os.path.exists(hash_id_db_filename)) self.assertEqual(open(hash_id_db_filename).read(), "hash-ids") + result.addCallback(callback) mock_fetch_async.assert_called_once_with( - hash_id_db_url, cainfo=None, proxy=None) + hash_id_db_url, + cainfo=None, + proxy=None, + ) return result - @mock.patch("landscape.client.package.reporter.fetch_async", - return_value=fail(FetchError("fetch error"))) + @mock.patch( + "landscape.client.package.reporter.fetch_async", + return_value=fail(FetchError("fetch error")), + ) @mock.patch("logging.warning", return_value=None) def test_fetch_hash_id_db_with_download_error( - self, logging_mock, mock_fetch_async): + self, + logging_mock, + mock_fetch_async, + ): # Assume package_hash_id_url is set self.config.data_path = self.makeDir() @@ -495,15 +609,23 @@ # We shouldn't have any hash=>id database def callback(ignored): hash_id_db_filename = os.path.join( - self.config.data_path, "package", "hash-id", - "uuid_codename_arch") + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) self.assertEqual(os.path.exists(hash_id_db_filename), False) + result.addCallback(callback) logging_mock.assert_called_once_with( - "Couldn't download hash=>id database: fetch error") + "Couldn't download hash=>id database: fetch error", + ) mock_fetch_async.assert_called_once_with( - hash_id_db_url, cainfo=None, proxy=None) + hash_id_db_url, + cainfo=None, + proxy=None, + ) return result @mock.patch("logging.warning", return_value=None) @@ -524,17 +646,24 @@ # We shouldn't have any hash=>id database def callback(ignored): hash_id_db_filename = os.path.join( - self.config.data_path, "package", "hash-id", - "uuid_codename_arch") + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) self.assertEqual(os.path.exists(hash_id_db_filename), False) + result.addCallback(callback) logging_mock.assert_called_once_with( - "Can't determine the hash=>id database url") + "Can't determine the hash=>id database url", + ) return result - @mock.patch("landscape.client.package.reporter.fetch_async", - return_value=succeed(b"hash-ids")) + @mock.patch( + "landscape.client.package.reporter.fetch_async", + return_value=succeed(b"hash-ids"), + ) def test_fetch_hash_id_db_with_custom_certificate(self, mock_fetch_async): """ The L{PackageReporter.fetch_hash_id_db} method takes into account the @@ -551,13 +680,17 @@ self.facade.set_arch("arch") # Check fetch_async is called with the default url - hash_id_db_url = "http://fake.url/path/hash-id-databases/" \ - "uuid_codename_arch" + hash_id_db_url = ( + "http://fake.url/path/hash-id-databases/" "uuid_codename_arch" + ) # Now go! result = self.reporter.fetch_hash_id_db() mock_fetch_async.assert_called_once_with( - hash_id_db_url, cainfo=self.config.ssl_public_key, proxy=None) + hash_id_db_url, + cainfo=self.config.ssl_public_key, + proxy=None, + ) return result @@ -583,8 +716,10 @@ """ self.reporter.sources_list_filename = "/I/Dont/Exist/At/All" self.reporter.sources_list_directory = self.makeDir() - self.makeFile(dirname=self.reporter.sources_list_directory, - content="deb http://foo ./") + self.makeFile( + dirname=self.reporter.sources_list_directory, + content="deb http://foo ./", + ) self.assertTrue(self.reporter._apt_sources_have_changed()) def test_remove_expired_hash_id_request(self): @@ -594,8 +729,11 @@ request.timestamp -= HASH_ID_REQUEST_TIMEOUT def got_result(result): - self.assertRaises(UnknownHashIDRequest, - self.store.get_hash_id_request, request.id) + self.assertRaises( + UnknownHashIDRequest, + self.store.get_hash_id_request, + request.id, + ) result = self.reporter.remove_expired_hash_id_requests() return result.addCallback(got_result) @@ -620,9 +758,9 @@ def test_remove_expired_hash_id_request_updates_timestamps(self): request = self.store.add_hash_id_request([b"hash1"]) message_store = self.broker_service.message_store - message_id = message_store.add({"type": "add-packages", - "packages": [], - "request-id": request.id}) + message_id = message_store.add( + {"type": "add-packages", "packages": [], "request-id": request.id}, + ) request.message_id = message_id initial_timestamp = request.timestamp @@ -636,8 +774,11 @@ request = self.store.add_hash_id_request([b"hash1"]) def got_result(result): - self.assertRaises(UnknownHashIDRequest, - self.store.get_hash_id_request, request.id) + self.assertRaises( + UnknownHashIDRequest, + self.store.get_hash_id_request, + request.id, + ) result = self.reporter.remove_expired_hash_id_requests() return result.addCallback(got_result) @@ -649,10 +790,16 @@ message_store.set_accepted_types(["unknown-package-hashes"]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"hashes": EqualsHashes(HASH1, HASH3), - "request-id": 1, - "type": "unknown-package-hashes"}]) + self.assertMessages( + message_store.get_pending_messages(), + [ + { + "hashes": EqualsHashes(HASH1, HASH3), + "request-id": 1, + "type": "unknown-package-hashes", + }, + ], + ) message = message_store.get_pending_messages()[0] @@ -668,8 +815,12 @@ message_store = self.broker_service.message_store message_store.set_accepted_types(["unknown-package-hashes"]) - self.addCleanup(setattr, reporter, "MAX_UNKNOWN_HASHES_PER_REQUEST", - reporter.MAX_UNKNOWN_HASHES_PER_REQUEST) + self.addCleanup( + setattr, + reporter, + "MAX_UNKNOWN_HASHES_PER_REQUEST", + reporter.MAX_UNKNOWN_HASHES_PER_REQUEST, + ) reporter.MAX_UNKNOWN_HASHES_PER_REQUEST = 2 @@ -707,10 +858,16 @@ self.store.add_hash_id_request([HASH1, HASH3]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"hashes": [HASH2], - "request-id": 2, - "type": "unknown-package-hashes"}]) + self.assertMessages( + message_store.get_pending_messages(), + [ + { + "hashes": [HASH2], + "request-id": 2, + "type": "unknown-package-hashes", + }, + ], + ) message = message_store.get_pending_messages()[0] @@ -754,7 +911,9 @@ self.assertEqual(list(self.store.iter_hash_id_requests()), []) with mock.patch.object( - self.reporter._broker, "send_message") as send_mock: + self.reporter._broker, + "send_message", + ) as send_mock: send_mock.return_value = deferred result = self.reporter.request_unknown_hashes() self.assertFailure(result, Boom) @@ -773,8 +932,10 @@ self.assertFalse(os.path.exists(self.check_stamp_file)) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [(1, 3)]}], + ) self.assertTrue(os.path.exists(self.check_stamp_file)) result = self.reporter.detect_packages_changes() @@ -787,8 +948,10 @@ self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [(1, 3)]}], + ) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) @@ -802,8 +965,10 @@ self.store.set_hash_ids({HASH1: 1, HASH3: 3}) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [1, 3]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [1, 3]}], + ) self.assertEqual(sorted(self.store.get_available()), [1, 3]) @@ -818,8 +983,10 @@ self.store.add_available([1, 3]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [2]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [2]}], + ) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) @@ -836,9 +1003,10 @@ self.store.add_available([1, 2, 3]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", - "not-available": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "not-available": [(1, 3)]}], + ) self.assertEqual(self.store.get_available(), []) @@ -855,8 +1023,10 @@ self.set_pkg1_installed() def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "installed": [1]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "installed": [1]}], + ) self.assertEqual(self.store.get_installed(), [1]) @@ -890,8 +1060,10 @@ def got_result(result): self.assertTrue(result) - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "not-installed": [1]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "not-installed": [1]}], + ) self.assertEqual(self.store.get_installed(), []) @@ -920,15 +1092,15 @@ self.set_pkg1_installed() self.facade.reload_channels() - self.store.set_hash_ids( - {HASH1: 1, upgrade_hash: 2, HASH3: 3}) + self.store.set_hash_ids({HASH1: 1, upgrade_hash: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) self.store.add_installed([1]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", - "available-upgrades": [2]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available-upgrades": [2]}], + ) self.assertEqual(self.store.get_available_upgrades(), [2]) @@ -944,9 +1116,10 @@ self.store.add_available_upgrades([2]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", - "not-available-upgrades": [2]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "not-available-upgrades": [2]}], + ) self.assertEqual(self.store.get_available_upgrades(), []) @@ -1014,11 +1187,16 @@ yield self.reporter.detect_packages_changes() - self.assertMessages(message_store.get_pending_messages(), [{ - "type": "packages", - "available": [(1, 3)], - "security": [(1, 3)], - }]) + self.assertMessages( + message_store.get_pending_messages(), + [ + { + "type": "packages", + "available": [(1, 3)], + "security": [(1, 3)], + }, + ], + ) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) self.assertEqual(sorted(self.store.get_security()), [1, 2, 3]) @@ -1034,10 +1212,15 @@ yield self.reporter.detect_packages_changes() - self.assertMessages(message_store.get_pending_messages(), [{ - "type": "packages", - "not-security": [1, 2], - }]) + self.assertMessages( + message_store.get_pending_messages(), + [ + { + "type": "packages", + "not-security": [1, 2], + }, + ], + ) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) self.assertEqual(self.store.get_security(), []) @@ -1082,8 +1265,10 @@ self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [(1, 3)]}], + ) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) @@ -1119,8 +1304,10 @@ self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [(1, 3)]}], + ) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) @@ -1140,17 +1327,20 @@ touch_file(self.check_stamp_file) - self.store.add_task("reporter", - {"type": "package-ids", "ids": [123, 456], - "request-id": 123}) + self.store.add_task( + "reporter", + {"type": "package-ids", "ids": [123, 456], "request-id": 123}, + ) yield self.reporter.handle_tasks() yield self.reporter.detect_packages_changes() # We check that detect changes run by looking at messages - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "available": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "available": [(1, 3)]}], + ) def test_detect_packages_changes_with_not_locked_and_ranges(self): """ @@ -1166,8 +1356,10 @@ self.store.add_available([1, 2, 3]) def got_result(result): - self.assertMessages(message_store.get_pending_messages(), - [{"type": "packages", "not-locked": [(1, 3)]}]) + self.assertMessages( + message_store.get_pending_messages(), + [{"type": "packages", "not-locked": [(1, 3)]}], + ) self.assertEqual(sorted(self.store.get_locked()), []) result = self.reporter.detect_packages_changes() @@ -1177,8 +1369,11 @@ """ The L{PackageReporter.detect_changes} method package changes. """ - with mock.patch.object(self.reporter, "detect_packages_changes", - return_value=succeed(True)) as reporter_mock: + with mock.patch.object( + self.reporter, + "detect_packages_changes", + return_value=succeed(True), + ) as reporter_mock: self.successResultOf(self.reporter.detect_changes()) reporter_mock.assert_called_once_with() @@ -1190,8 +1385,11 @@ """ callback = mock.Mock() self.broker_service.reactor.call_on("package-data-changed", callback) - with mock.patch.object(self.reporter, "detect_packages_changes", - return_value=succeed(True)) as reporter_mock: + with mock.patch.object( + self.reporter, + "detect_packages_changes", + return_value=succeed(True), + ) as reporter_mock: self.successResultOf(self.reporter.detect_changes()) reporter_mock.assert_called_once_with() callback.assert_called_once_with() @@ -1203,9 +1401,11 @@ self.reporter.use_hash_id_db = mock.Mock(return_value=results[2]) self.reporter.handle_tasks = mock.Mock(return_value=results[3]) self.reporter.remove_expired_hash_id_requests = mock.Mock( - return_value=results[4]) + return_value=results[4], + ) self.reporter.request_unknown_hashes = mock.Mock( - return_value=results[5]) + return_value=results[5], + ) self.reporter.detect_changes = mock.Mock(return_value=results[6]) self.reporter.run() @@ -1239,7 +1439,10 @@ them as utf-8 (LP: #1827857). """ self._add_package_to_deb_dir( - self.repository_dir, "gosa", description=u"GOsa\u00B2") + self.repository_dir, + "gosa", + description="GOsa\u00B2", + ) self.facade.reload_channels() # Set the only non-utf8 locale which we're sure exists. @@ -1255,7 +1458,7 @@ # description translation. pkg = self.facade.get_packages_by_name("gosa")[0] skel = self.facade.get_package_skeleton(pkg, with_info=True) - self.assertEqual(u"GOsa\u00B2", skel.description) + self.assertEqual("GOsa\u00B2", skel.description) def test_find_reporter_command_with_bindir(self): self.config.bindir = "/spam/eggs" @@ -1266,7 +1469,8 @@ def test_find_reporter_command_default(self): expected = os.path.join( os.path.dirname(os.path.abspath(sys.argv[0])), - "landscape-package-reporter") + "landscape-package-reporter", + ) command = find_reporter_command() self.assertEqual(expected, command) @@ -1293,7 +1497,9 @@ message_store.set_server_uuid("uuid") self.facade.set_arch("arch") hash_id_file = os.path.join( - self.config.hash_id_directory, "uuid_codename_arch") + self.config.hash_id_directory, + "uuid_codename_arch", + ) os.makedirs(self.config.hash_id_directory) with open(hash_id_file, "w"): pass @@ -1315,8 +1521,10 @@ self.assertEqual(self.store.get_available_upgrades(), [2]) self.assertEqual(self.store.get_available(), [1]) self.assertEqual(self.store.get_installed(), [2]) - self.assertEqual(self.store.get_hash_id_request(request1.id).id, - request1.id) + self.assertEqual( + self.store.get_hash_id_request(request1.id).id, + request1.id, + ) self.store.add_task("reporter", {"type": "resynchronize"}) @@ -1324,7 +1532,7 @@ self.reactor.advance(0) with mock.patch( "landscape.client.package.taskhandler.parse_lsb_release", - side_effect=lambda _: {"code-name": "codename"} + side_effect=lambda _: {"code-name": "codename"}, ): yield deferred @@ -1372,13 +1580,18 @@ self.assertEqual("error", err) self.assertEqual(0, code) self.assertFalse(warning_mock.called) - debug_mock.assert_has_calls([ - mock.call( - "Checking if ubuntu-release-upgrader is running."), - mock.call( - "'%s' exited with status 0 (out='output', err='error')" - % self.reporter.apt_update_filename) - ]) + debug_mock.assert_has_calls( + [ + mock.call( + "Checking if ubuntu-release-upgrader is running.", + ), + mock.call( + f"'{self.reporter.apt_update_filename}' exited" + " with status 0 (out='output', err='error')", + ), + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1411,8 +1624,10 @@ sources.list file has changed. """ - self.assertEqual(self.reporter.sources_list_filename, - "/etc/apt/sources.list") + self.assertEqual( + self.reporter.sources_list_filename, + "/etc/apt/sources.list", + ) self.reporter.sources_list_filename = self.makeFile("deb ftp://url ./") self._make_fake_apt_update() @@ -1444,8 +1659,9 @@ self.assertEqual("error", err) self.assertEqual(2, code) warning_mock.assert_called_once_with( - "'%s' exited with status 2 (error)" % - self.reporter.apt_update_filename) + f"'{self.reporter.apt_update_filename}' " + "exited with status 2 (error)", + ) result.addCallback(callback) self.reactor.advance(0) @@ -1464,9 +1680,11 @@ "spawn_process", side_effect=[ # Simulate series of failures to acquire the apt lock. - succeed((b'', b'', 100)), - succeed((b'', b'', 100)), - succeed((b'', b'', 100))]) + succeed((b"", b"", 100)), + succeed((b"", b"", 100)), + succeed((b"", b"", 100)), + ], + ) spawn_patcher.start() self.addCleanup(spawn_patcher.stop) @@ -1481,10 +1699,15 @@ result.addCallback(callback) self.reactor.advance(60) message = "Could not acquire the apt lock. Retrying in {} seconds." - calls = [mock.call(message.format(20)), - mock.call(message.format(40)), - mock.call("'{}' exited with status 1000 ()".format( - self.reporter.apt_update_filename))] + calls = [ + mock.call(message.format(20)), + mock.call(message.format(40)), + mock.call( + "'{}' exited with status 1000 ()".format( + self.reporter.apt_update_filename, + ), + ), + ] logging_mock.has_calls(calls) return result @@ -1504,8 +1727,10 @@ "spawn_process", side_effect=[ # Simulate a failed apt lock grab then a successful one. - succeed((b'', b'', 100)), - succeed((b'output', b'error', 0))]) + succeed((b"", b"", 100)), + succeed((b"output", b"error", 0)), + ], + ) spawn_patcher.start() self.addCleanup(spawn_patcher.stop) @@ -1517,7 +1742,8 @@ self.assertEqual("error", err) self.assertEqual(0, code) warning_mock.assert_called_once_with( - "Could not acquire the apt lock. Retrying in 20 seconds.") + "Could not acquire the apt lock. Retrying in 20 seconds.", + ) result.addCallback(callback) self.reactor.advance(20) @@ -1540,8 +1766,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 10.0, "code": 0, "err": u""}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 10.0, + "code": 0, + "err": "", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1565,8 +1799,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 2, "err": u"error"}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 2, + "err": "error", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1591,13 +1833,24 @@ result = self.reporter.run_apt_update() def callback(ignore): - error = "There are no APT sources configured in %s or %s." % ( - self.reporter.sources_list_filename, - self.reporter.sources_list_directory) + error = ( + "There are no APT sources configured in {} or {}.".format( + self.reporter.sources_list_filename, + self.reporter.sources_list_directory, + ) + ) self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 1, "err": error}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 1, + "err": error, + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1622,8 +1875,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 2, "err": u"error"}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 2, + "err": "error", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1647,8 +1908,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 0, "err": u"message"}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 0, + "err": "message", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1679,8 +1948,10 @@ self.assertEqual("", err) self.assertEqual(0, code) debug_mock.assert_called_once_with( - ("'%s' didn't run, conditions not met" - ) % self.reporter.apt_update_filename) + f"'{self.reporter.apt_update_filename}' " + "didn't run, conditions not met", + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1700,7 +1971,9 @@ self.makeFile("", path=self.config.update_stamp_filename) expired_time = time.time() - self.config.apt_update_interval - 1 os.utime( - self.config.update_stamp_filename, (expired_time, expired_time)) + self.config.update_stamp_filename, + (expired_time, expired_time), + ) # The interval for the update-notifier-common stamp file is not # expired. self.reporter.update_notifier_stamp = self.makeFile("") @@ -1720,8 +1993,10 @@ self.assertEqual("", err) self.assertEqual(0, code) debug_mock.assert_called_once_with( - ("'%s' didn't run, conditions not met" - ) % self.reporter.apt_update_filename) + f"'{self.reporter.apt_update_filename}' " + "didn't run, conditions not met", + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1741,10 +2016,14 @@ # The interval for both stamp files is expired. self.makeFile("", path=self.config.update_stamp_filename) os.utime( - self.config.update_stamp_filename, (expired_time, expired_time)) + self.config.update_stamp_filename, + (expired_time, expired_time), + ) self.reporter.update_notifier_stamp = self.makeFile("") os.utime( - self.reporter.update_notifier_stamp, (expired_time, expired_time)) + self.reporter.update_notifier_stamp, + (expired_time, expired_time), + ) message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) @@ -1757,8 +2036,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 0, "err": u"message"}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 0, + "err": "message", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1780,7 +2067,9 @@ def callback(ignored): self.assertTrue( - os.path.exists(self.config.update_stamp_filename)) + os.path.exists(self.config.update_stamp_filename), + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1788,8 +2077,10 @@ reactor.callWhenRunning(do_test) return deferred - @mock.patch("landscape.client.package.reporter.spawn_process", - return_value=succeed((b"", b"", 0))) + @mock.patch( + "landscape.client.package.reporter.spawn_process", + return_value=succeed((b"", b"", 0)), + ) def test_run_apt_update_honors_http_proxy(self, mock_spawn_process): """ The PackageReporter.run_apt_update method honors the http_proxy @@ -1805,10 +2096,13 @@ mock_spawn_process.assert_called_once_with( self.reporter.apt_update_filename, - env={"http_proxy": "http://proxy_server:8080"}) + env={"http_proxy": "http://proxy_server:8080"}, + ) - @mock.patch("landscape.client.package.reporter.spawn_process", - return_value=succeed((b"", b"", 0))) + @mock.patch( + "landscape.client.package.reporter.spawn_process", + return_value=succeed((b"", b"", 0)), + ) def test_run_apt_update_honors_https_proxy(self, mock_spawn_process): """ The PackageReporter.run_apt_update method honors the https_proxy @@ -1824,7 +2118,8 @@ mock_spawn_process.assert_called_once_with( self.reporter.apt_update_filename, - env={"https_proxy": "http://proxy_server:8443"}) + env={"https_proxy": "http://proxy_server:8443"}, + ) def test_run_apt_update_error_on_cache_file(self): """ @@ -1836,13 +2131,17 @@ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update( - code=2, out="not important", - err=("E: Problem renaming the file " - "/var/cache/apt/pkgcache.bin.6ZsRSX to " - "/var/cache/apt/pkgcache.bin - rename (2: No such file " - "or directory)\n" - "W: You may want to run apt-get update to correct these " - "problems")) + code=2, + out="not important", + err=( + "E: Problem renaming the file " + "/var/cache/apt/pkgcache.bin.6ZsRSX to " + "/var/cache/apt/pkgcache.bin - rename (2: No such file " + "or directory)\n" + "W: You may want to run apt-get update to correct these " + "problems" + ), + ) deferred = Deferred() def do_test(): @@ -1851,8 +2150,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 0, "err": u""}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 0, + "err": "", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1868,17 +2175,21 @@ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update( - code=2, out="not important", - err=("E: Problem renaming the file " - "/var/cache/apt/srcpkgcache.bin.Pw1Zxy to " - "/var/cache/apt/srcpkgcache.bin - rename (2: No such file " - "or directory)\n" - "E: Problem renaming the file " - "/var/cache/apt/pkgcache.bin.wz8ooS to " - "/var/cache/apt/pkgcache.bin - rename (2: No such file " - "or directory)\n" - "E: The package lists or status file could not be parsed " - "or opened.")) + code=2, + out="not important", + err=( + "E: Problem renaming the file " + "/var/cache/apt/srcpkgcache.bin.Pw1Zxy to " + "/var/cache/apt/srcpkgcache.bin - rename (2: No such file " + "or directory)\n" + "E: Problem renaming the file " + "/var/cache/apt/pkgcache.bin.wz8ooS to " + "/var/cache/apt/pkgcache.bin - rename (2: No such file " + "or directory)\n" + "E: The package lists or status file could not be parsed " + "or opened." + ), + ) deferred = Deferred() @@ -1888,8 +2199,16 @@ def callback(ignore): self.assertMessages( message_store.get_pending_messages(), - [{"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 0, "err": u""}]) + [ + { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 0, + "err": "", + }, + ], + ) + result.addCallback(callback) self.reactor.advance(0) result.chainDeferred(deferred) @@ -1922,6 +2241,7 @@ def callback(ignore): self.assertMessages(message_store.get_pending_messages(), []) self.assertEqual([1234], intervals) + result.addCallback(callback) result.chainDeferred(deferred) @@ -1996,12 +2316,17 @@ # no 'release upgrader running' self.assertFalse(self.reporter._is_release_upgrader_running()) # fake 'release ugrader' running with non-root UID - p = subprocess.Popen([reporter.PYTHON_BIN, '-c', - 'import time; time.sleep(10)', - reporter.RELEASE_UPGRADER_PATTERN + "12345"]) + p = subprocess.Popen( + [ + reporter.PYTHON_BIN, + "-c", + "import time; time.sleep(10)", + reporter.RELEASE_UPGRADER_PATTERN + "12345", + ], + ) self.assertFalse(self.reporter._is_release_upgrader_running()) # fake 'release upgrader' running - reporter.UID_ROOT = "%d" % os.getuid() + reporter.UID_ROOT = f"{os.getuid():d}" self.assertTrue(self.reporter._is_release_upgrader_running()) p.terminate() @@ -2011,12 +2336,17 @@ helpers = [AptFacadeHelper, SimpleRepositoryHelper, BrokerServiceHelper] def setUp(self): - super(GlobalPackageReporterAptTest, self).setUp() + super().setUp() self.store = FakePackageStore(self.makeFile()) self.config = PackageReporterConfiguration() self.reactor = FakeReactor() self.reporter = FakeGlobalReporter( - self.store, self.facade, self.remote, self.config, self.reactor) + self.store, + self.facade, + self.remote, + self.config, + self.reactor, + ) # Assume update-notifier-common stamp file is not present by # default. self.reporter.update_notifier_stamp = "/Not/Existing" @@ -2030,7 +2360,8 @@ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self.reporter.apt_update_filename = self.makeFile( - "#!/bin/sh\necho -n error >&2\necho -n output\nexit 0") + "#!/bin/sh\necho -n error >&2\necho -n output\nexit 0", + ) os.chmod(self.reporter.apt_update_filename, 0o755) deferred = Deferred() @@ -2040,15 +2371,25 @@ self.reactor.advance(0) def callback(ignore): - message = {"type": "package-reporter-result", - "report-timestamp": 0.0, "code": 0, "err": u"error"} + message = { + "type": "package-reporter-result", + "report-timestamp": 0.0, + "code": 0, + "err": "error", + } self.assertMessages( - message_store.get_pending_messages(), [message]) - stored = list(self.store._db.execute( - "SELECT id, data FROM message").fetchall()) + message_store.get_pending_messages(), + [message], + ) + stored = list( + self.store._db.execute( + "SELECT id, data FROM message", + ).fetchall(), + ) self.assertEqual(1, len(stored)) self.assertEqual(1, stored[0][0]) self.assertEqual(message, bpickle.loads(bytes(stored[0][1]))) + result.addCallback(callback) result.chainDeferred(deferred) @@ -2061,7 +2402,7 @@ helpers = [EnvironSaverHelper, BrokerServiceHelper] def setUp(self): - super(FakePackageReporterTest, self).setUp() + super().setUp() self.store = FakePackageStore(self.makeFile()) global_file = self.makeFile() self.global_store = FakePackageStore(global_file) @@ -2069,7 +2410,12 @@ self.config = PackageReporterConfiguration() self.reactor = FakeReactor() self.reporter = FakeReporter( - self.store, None, self.remote, self.config, self.reactor) + self.store, + None, + self.remote, + self.config, + self.reactor, + ) self.config.data_path = self.makeDir() os.mkdir(self.config.package_directory) @@ -2080,16 +2426,19 @@ """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) - message = {"type": "package-reporter-result", - "code": 0, "err": u"error"} + message = { + "type": "package-reporter-result", + "code": 0, + "err": "error", + } self.global_store.save_message(message) def check(ignore): messages = message_store.get_pending_messages() - self.assertMessages( - messages, [message]) - stored = list(self.store._db.execute( - "SELECT id FROM message").fetchall()) + self.assertMessages(messages, [message]) + stored = list( + self.store._db.execute("SELECT id FROM message").fetchall(), + ) self.assertEqual(1, len(stored)) self.assertEqual(1, stored[0][0]) @@ -2103,26 +2452,38 @@ """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) - message1 = {"type": "package-reporter-result", - "code": 0, "err": u"error"} + message1 = { + "type": "package-reporter-result", + "code": 0, + "err": "error", + } self.global_store.save_message(message1) - message2 = {"type": "package-reporter-result", - "code": 1, "err": u"error"} + message2 = { + "type": "package-reporter-result", + "code": 1, + "err": "error", + } self.global_store.save_message(message2) def check1(ignore): self.assertMessages( - message_store.get_pending_messages(), [message1]) - stored = list(self.store._db.execute( - "SELECT id FROM message").fetchall()) + message_store.get_pending_messages(), + [message1], + ) + stored = list( + self.store._db.execute("SELECT id FROM message").fetchall(), + ) self.assertEqual(1, stored[0][0]) return self.reporter.run().addCallback(check2) def check2(ignore): self.assertMessages( - message_store.get_pending_messages(), [message1, message2]) - stored = list(self.store._db.execute( - "SELECT id FROM message").fetchall()) + message_store.get_pending_messages(), + [message1, message2], + ) + stored = list( + self.store._db.execute("SELECT id FROM message").fetchall(), + ) self.assertEqual(2, len(stored)) self.assertEqual(1, stored[0][0]) self.assertEqual(2, stored[1][0]) @@ -2130,8 +2491,7 @@ return self.reporter.run().addCallback(check1) -class EqualsHashes(object): - +class EqualsHashes: def __init__(self, *hashes): self._hashes = sorted(hashes) diff -Nru landscape-client-23.02/landscape/client/package/tests/test_taskhandler.py landscape-client-23.08/landscape/client/package/tests/test_taskhandler.py --- landscape-client-23.02/landscape/client/package/tests/test_taskhandler.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/package/tests/test_taskhandler.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,27 +1,35 @@ import os from subprocess import CalledProcessError +from unittest.mock import ANY +from unittest.mock import Mock +from unittest.mock import patch + +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import succeed -from mock import patch, Mock, ANY - -from twisted.internet.defer import Deferred, fail, succeed - +from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.package.taskhandler import LazyRemoteBroker +from landscape.client.package.taskhandler import PackageTaskHandler +from landscape.client.package.taskhandler import ( + PackageTaskHandlerConfiguration, +) +from landscape.client.package.taskhandler import run_task_handler +from landscape.client.tests.helpers import BrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest from landscape.lib.apt.package.facade import AptFacade -from landscape.lib.apt.package.store import HashIdStore, PackageStore +from landscape.lib.apt.package.store import HashIdStore +from landscape.lib.apt.package.store import PackageStore from landscape.lib.apt.package.testing import AptFacadeHelper from landscape.lib.lock import lock_path -from landscape.lib.testing import EnvironSaverHelper, FakeReactor -from landscape.client.broker.amp import RemoteBrokerConnector -from landscape.client.package.taskhandler import ( - PackageTaskHandlerConfiguration, PackageTaskHandler, run_task_handler, - LazyRemoteBroker) -from landscape.client.tests.helpers import LandscapeTest, BrokerServiceHelper +from landscape.lib.testing import EnvironSaverHelper +from landscape.lib.testing import FakeReactor SAMPLE_LSB_RELEASE = "DISTRIB_CODENAME=codename\n" class PackageTaskHandlerConfigurationTest(LandscapeTest): - def test_update_stamp_option(self): """ L{PackageReporterConfiguration.update_stamp_filename} points @@ -30,7 +38,8 @@ config = PackageTaskHandlerConfiguration() self.assertEqual( config.update_stamp_filename, - "/var/lib/landscape/client/package/update-stamp") + "/var/lib/landscape/client/package/update-stamp", + ) class PackageTaskHandlerTest(LandscapeTest): @@ -38,12 +47,17 @@ helpers = [AptFacadeHelper, EnvironSaverHelper, BrokerServiceHelper] def setUp(self): - super(PackageTaskHandlerTest, self).setUp() + super().setUp() self.config = PackageTaskHandlerConfiguration() self.store = PackageStore(self.makeFile()) self.reactor = FakeReactor() self.handler = PackageTaskHandler( - self.store, self.facade, self.remote, self.config, self.reactor) + self.store, + self.facade, + self.remote, + self.config, + self.reactor, + ) def test_use_hash_id_db(self): @@ -53,8 +67,12 @@ # An appropriate hash=>id database is available self.config.data_path = self.makeDir() os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) - hash_id_db_filename = os.path.join(self.config.data_path, "package", - "hash-id", "uuid_codename_arch") + hash_id_db_filename = os.path.join( + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) HashIdStore(hash_id_db_filename).set_hash_ids({b"hash": 123}) # Fake uuid, codename and arch @@ -69,6 +87,7 @@ # Now we do have the hash=>id mapping def callback(ignored): self.assertEqual(self.store.get_hash_id(b"hash"), 123) + result.addCallback(callback) return result @@ -89,7 +108,8 @@ # The failure should be properly logged logging_mock.assert_called_with( "Couldn't determine which hash=>id database to use: " - "missing code-name key in %s" % self.handler.lsb_release_filename) + f"missing code-name key in {self.handler.lsb_release_filename}", + ) return result @@ -111,8 +131,9 @@ # The failure should be properly logged logging_mock.assert_called_with( "Couldn't determine which hash=>id database to use: " - "[Errno 2] No such file or directory: '%s'" % - self.handler.lsb_release_filename) + "[Errno 2] No such file or directory: " + f"'{self.handler.lsb_release_filename}'", + ) return result @@ -128,6 +149,7 @@ def callback(hash_id_db_filename): self.assertIs(hash_id_db_filename, None) + result.addCallback(callback) return result @@ -144,10 +166,12 @@ logging_mock.assert_called_with( "Couldn't determine which hash=>id database to use: " - "server UUID not available") + "server UUID not available", + ) def callback(ignore): self.assertFalse(self.store.has_hash_id_db()) + result.addCallback(callback) return result @@ -156,7 +180,7 @@ L{get_session_id} returns a session ID. """ - def assertHaveSessionId(session_id): + def assertHaveSessionId(session_id): # noqa: N802 self.assertTrue(session_id is not None) result = self.handler.get_session_id() @@ -180,7 +204,8 @@ # The failure should be properly logged logging_mock.assert_called_with( "Couldn't determine which hash=>id database to use: " - "unknown dpkg architecture") + "unknown dpkg architecture", + ) return result @@ -201,6 +226,7 @@ # We go on without the hash=>id database def callback(ignored): self.assertFalse(self.store.has_hash_id_db()) + result.addCallback(callback) return result @@ -211,8 +237,12 @@ # Let's say the appropriate database is actually garbage self.config.data_path = self.makeDir() os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) - hash_id_db_filename = os.path.join(self.config.data_path, "package", - "hash-id", "uuid_codename_arch") + hash_id_db_filename = os.path.join( + self.config.data_path, + "package", + "hash-id", + "uuid_codename_arch", + ) open(hash_id_db_filename, "w").write("junk") # Fake uuid, codename and arch @@ -226,12 +256,14 @@ # The failure should be properly logged logging_mock.assert_called_with( - "Invalid hash=>id database %s" % hash_id_db_filename) + f"Invalid hash=>id database {hash_id_db_filename}", + ) # We remove the broken hash=>id database and go on without it def callback(ignored): self.assertFalse(os.path.exists(hash_id_db_filename)) self.assertFalse(self.store.has_hash_id_db()) + result.addCallback(callback) return result @@ -331,8 +363,14 @@ @patch("landscape.client.package.taskhandler.LandscapeReactor") @patch("landscape.client.package.taskhandler.init_logging") @patch("landscape.client.package.taskhandler.lock_path") - def test_run_task_handler(self, lock_path_mock, init_logging_mock, - reactor_class_mock, connector_class_mock, umask): + def test_run_task_handler( + self, + lock_path_mock, + init_logging_mock, + reactor_class_mock, + connector_class_mock, + umask, + ): """ The L{run_task_handler} function creates and runs the given task handler with the proper arguments. @@ -350,10 +388,9 @@ handler_args = [] class HandlerMock(PackageTaskHandler): - def __init__(self, *args): handler_args.extend(args) - super(HandlerMock, self).__init__(*args) + super().__init__(*args) call_when_running = [] reactor_mock = Mock(name="mock-reactor") @@ -379,8 +416,11 @@ self.assertEqual(other_store.get_available(), [1, 2, 3]) # Check the hash=>id database directory as well - self.assertTrue(os.path.exists( - os.path.join(self.data_path, "package", "hash-id"))) + self.assertTrue( + os.path.exists( + os.path.join(self.data_path, "package", "hash-id"), + ), + ) result = run_task_handler(HandlerMock, ["-c", self.config_filename]) @@ -388,7 +428,8 @@ # never have two instances running in parallel. The 'default' # below comes from the queue_name attribute. lock_path_mock.assert_called_once_with( - os.path.join(self.data_path, "package", "default.lock")) + os.path.join(self.data_path, "package", "default.lock"), + ) # Once locking is done, it's safe to start logging without # corrupting the file. We don't want any output unless it's @@ -419,8 +460,10 @@ lock_path(os.path.join(self.data_path, "package", "default.lock")) try: - run_task_handler(PackageTaskHandler, - ["-c", self.config_filename, "--quiet"]) + run_task_handler( + PackageTaskHandler, + ["-c", self.config_filename, "--quiet"], + ) except SystemExit as e: self.assertEqual(str(e), "") else: @@ -428,14 +471,12 @@ @patch("landscape.client.package.taskhandler.init_logging") def test_errors_are_printed_and_exit_program(self, init_logging_mock): - class MyException(Exception): pass self.log_helper.ignore_errors(MyException) class HandlerMock(PackageTaskHandler): - def run(self): return fail(MyException("Hey error")) @@ -445,9 +486,11 @@ self.assertIn("MyException", self.logfile.getvalue()) init_logging_mock.assert_called_once_with(ANY, "handler-mock") - result = run_task_handler(HandlerMock, - ["-c", self.config_filename], - reactor=FakeReactor()) + result = run_task_handler( + HandlerMock, + ["-c", self.config_filename], + reactor=FakeReactor(), + ) return result.addCallback(assert_log) diff -Nru landscape-client-23.02/landscape/client/patch.py landscape-client-23.08/landscape/client/patch.py --- landscape-client-23.02/landscape/client/patch.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/patch.py 2023-08-17 21:19:32.000000000 +0000 @@ -5,7 +5,7 @@ """Two upgraders with the same version have been registered.""" -class UpgradeManagerBase(object): +class UpgradeManagerBase: """A simple upgrade system.""" def __init__(self): @@ -22,8 +22,9 @@ """ if version in self._upgraders: raise UpgraderConflict( - "%s is already registered as %s; not adding %s" % - (version, self._upgraders[version], function)) + f"{version} is already registered as " + f"{self._upgraders[version]}; not adding {function}", + ) self._upgraders[version] = function def get_version(self): @@ -44,14 +45,15 @@ @param version: The version number that the function will be upgrading to. """ + def inner(function): self.register_upgrader(version, function) return function + return inner class UpgradeManager(UpgradeManagerBase): - def apply(self, persist): """Bring the database up-to-date. @@ -64,7 +66,7 @@ if version > persist.get("system-version"): persist.set("system-version", version) upgrader(persist) - logging.info("Successfully applied patch %s" % version) + logging.info(f"Successfully applied patch {version}") def initialize(self, persist): """ @@ -80,7 +82,7 @@ def get_database_versions(self, cursor): cursor.execute("SELECT version FROM patch") result = cursor.fetchall() - return set([row[0] for row in result]) + return {row[0] for row in result} def get_database_version(self, cursor): cursor.execute("SELECT MAX(version) FROM patch") diff -Nru landscape-client-23.02/landscape/client/reactor.py landscape-client-23.08/landscape/client/reactor.py --- landscape-client-23.02/landscape/client/reactor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/reactor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,8 @@ """ Extend the regular Twisted reactor with event-handling features. """ -from landscape.lib.reactor import EventHandlingReactor from landscape.client.lockfile import patch_lockfile +from landscape.lib.reactor import EventHandlingReactor patch_lockfile() diff -Nru landscape-client-23.02/landscape/client/serviceconfig.py landscape-client-23.08/landscape/client/serviceconfig.py --- landscape-client-23.02/landscape/client/serviceconfig.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/serviceconfig.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,130 @@ +"""Interface for interacting with service managers managing landscape-client. + +This module exports a single class, `ServiceConfig`, which is aliased to a +concrete implementation based on configuration. Concrete implementations +provide the following interface: + +set_start_on_boot(flag: bool) -> None +restart_landscape() -> None +stop_landscape() -> None +is_configured_to_run() -> bool + +Methods should raise `ServiceConfigException` if necessary. +""" +import os +import subprocess + +SERVICE_NAME = "landscape-client" +SYSTEMD_SERVICE = f"{SERVICE_NAME}.service" + +SNAPCTL = "/usr/bin/snapctl" +SYSTEMCTL = "/usr/bin/systemctl" + + +class ServiceConfigException(Exception): + """ + Error modifying the landscape-client service's status or configuration. + """ + + +class SystemdConfig: + """ + A collection of methods for driving the landscape-client systemd service. + """ + + @staticmethod + def set_start_on_boot(flag: bool) -> None: + action = "enable" if flag else "disable" + SystemdConfig._call_systemctl(action) + + @staticmethod + def restart_landscape() -> None: + try: + SystemdConfig._call_systemctl("restart", check=True) + except subprocess.CalledProcessError: + raise ServiceConfigException(f"Could not restart {SERVICE_NAME}") + + @staticmethod + def stop_landscape() -> None: + try: + SystemdConfig._call_systemctl("stop", check=True) + except subprocess.CalledProcessError: + raise ServiceConfigException(f"Could not stop {SERVICE_NAME}") + + @staticmethod + def is_configured_to_run() -> bool: + completed_process = SystemdConfig._call_systemctl("is-enabled") + return completed_process.returncode == 0 + + @staticmethod + def _call_systemctl( + action: str, + **run_kwargs, + ) -> subprocess.CompletedProcess: + """Calls systemctl, passing `action` and `run_kwargs`, while consuming + stdout. + """ + return subprocess.run( + [SYSTEMCTL, action, SYSTEMD_SERVICE, "--quiet"], + stdout=subprocess.PIPE, + **run_kwargs, + ) + + +class SnapdConfig: + """ + A collection of methods for driving the landscape-client snapd service. + """ + + @staticmethod + def set_start_on_boot(flag: bool) -> None: + if flag: + cmd = "start" + param = "--enable" + else: + cmd = "stop" + param = "--disable" + + SnapdConfig._call_snapctl(cmd, param) + + @staticmethod + def restart_landscape() -> None: + try: + SnapdConfig._call_snapctl("restart", check=True) + except subprocess.CalledProcessError: + raise ServiceConfigException(f"Could not restart {SERVICE_NAME}") + + @staticmethod + def stop_landscape() -> None: + try: + SnapdConfig._call_snapctl("stop", check=True) + except subprocess.CalledProcessError: + raise ServiceConfigException(f"Could not stop {SERVICE_NAME}") + + @staticmethod + def is_configured_to_run() -> bool: + completed_process = SnapdConfig._call_snapctl("services") + stdout = completed_process.stdout + + return stdout and "enabled" in stdout + + @staticmethod + def _call_snapctl( + action: str, + *cmd_args, + **run_kwargs, + ) -> subprocess.CompletedProcess: + """Calls snapctl, passing `action`, `cmd_args`, and `run_kwargs`, while + consuming stdout. + """ + return subprocess.run( + [SNAPCTL, action, SERVICE_NAME, *cmd_args], + stdout=subprocess.PIPE, + **run_kwargs, + ) + + +if os.environ.get("SNAP_REVISION"): + ServiceConfig = SnapdConfig +else: + ServiceConfig = SystemdConfig diff -Nru landscape-client-23.02/landscape/client/service.py landscape-client-23.08/landscape/client/service.py --- landscape-client-23.02/landscape/client/service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/service.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,18 @@ import logging import signal -from twisted.application.service import Application, Service from twisted.application.app import startApplication +from twisted.application.service import Application +from twisted.application.service import Service -from landscape.lib.logging import rotate_logs +from landscape.client.deployment import get_versioned_persist +from landscape.client.deployment import init_logging from landscape.client.reactor import LandscapeReactor -from landscape.client.deployment import get_versioned_persist, init_logging +from landscape.lib.logging import LoggingAttributeError +from landscape.lib.logging import rotate_logs -class LandscapeService(Service, object): +class LandscapeService(Service): """Utility superclass for defining Landscape services. This sets up the reactor and L{Persist} object. @@ -22,6 +25,7 @@ @ivar factory: A L{LandscapeComponentProtocolFactory}, it must be provided by instances of sub-classes. """ + reactor_factory = LandscapeReactor persist_filename = None @@ -32,21 +36,27 @@ self.persist = get_versioned_persist(self) if not (self.config is not None and self.config.ignore_sigusr1): from twisted.internet import reactor + signal.signal( signal.SIGUSR1, - lambda signal, frame: reactor.callFromThread(rotate_logs)) + lambda signal, frame: reactor.callFromThread(rotate_logs), + ) - def startService(self): + def startService(self): # noqa: N802 Service.startService(self) - logging.info("%s started with config %s" % ( - self.service_name.capitalize(), self.config.get_config_filename())) + logging.info( + f"{self.service_name.capitalize()} started with " + f"config {self.config.get_config_filename()}", + ) - def stopService(self): + def stopService(self): # noqa: N802 # We don't need to call port.stopListening(), because the reactor # shutdown sequence will do that for us. Service.stopService(self) - logging.info("%s stopped with config %s" % ( - self.service_name.capitalize(), self.config.get_config_filename())) + logging.info( + f"{self.service_name.capitalize()} stopped with " + f"config {self.config.get_config_filename()}", + ) def run_landscape_service(configuration_class, service_class, args): @@ -70,17 +80,20 @@ configuration = configuration_class() configuration.load(args) - init_logging(configuration, service_class.service_name) - application = Application("landscape-%s" % (service_class.service_name,)) + try: + init_logging(configuration, service_class.service_name) + except LoggingAttributeError: + return + application = Application(f"landscape-{service_class.service_name}") service = service_class(configuration) service.setServiceParent(application) if configuration.clones > 0: - # Increase the timeout of AMP's MethodCalls # XXX: we should find a better way to expose this knot, and # not set it globally on the class from landscape.lib.amp import MethodCallSender + MethodCallSender.timeout = 300 # Create clones here because LandscapeReactor.__init__ would otherwise @@ -88,10 +101,10 @@ clones = [] for i in range(configuration.clones): clone_config = configuration.clone() - clone_config.computer_title += " Clone %d" % i + clone_config.computer_title += f" Clone {i:d}" clone_config.master_data_path = configuration.data_path - clone_config.data_path += "-clone-%d" % i - clone_config.log_dir += "-clone-%d" % i + clone_config.data_path += f"-clone-{i:d}" + clone_config.log_dir += f"-clone-{i:d}" clone_config.is_clone = True clones.append(service_class(clone_config)) diff -Nru landscape-client-23.02/landscape/client/snap/http.py landscape-client-23.08/landscape/client/snap/http.py --- landscape-client-23.02/landscape/client/snap/http.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/snap/http.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,302 @@ +""" +Functions for interacting with the snapd REST API. +See https://snapcraft.io/docs/snapd-api for documentation of the API. +""" +import json +from io import BytesIO + +import pycurl + +SNAPD_SOCKET = "/run/snapd.socket" +BASE_URL = "http://localhost/v2" + +# For the below, refer to https://snapcraft.io/docs/snapd-api#heading--changes +COMPLETE_STATUSES = {"Done", "Error", "Hold", "Abort"} +INCOMPLETE_STATUSES = {"Do", "Doing", "Undo", "Undoing"} +SUCCESS_STATUSES = {"Done"} +ERROR_STATUSES = {"Error", "Hold", "Unknown"} + + +class SnapdHttpException(Exception): + @property + def json(self): + """Attempts to parse the body of this exception as json.""" + body = self.args[0] + + return json.loads(body) + + +class SnapHttp: + def __init__(self, snap_url=BASE_URL, snap_socket=SNAPD_SOCKET): + self._snap_url = snap_url + self._snap_socket = snap_socket + + def check_change(self, cid): + """Check the status of snapd change with id `cid`.""" + return self._get("/changes/" + cid) + + def check_changes(self): + """Check the status of all snapd changes.""" + return self._get("/changes?select=all") + + def enable_snap(self, name): + """Enables a previously disabled snap by `name`.""" + return self._post("/snaps/" + name, {"action": "enable"}) + + def enable_snaps(self, snaps): + """See `self.enable_snap`.""" + return self._post( + "/snaps", + { + "action": "enable", + "snaps": snaps, + }, + ) + + def disable_snap(self, name): + """ + Disables a snap by `name`, making its binaries and services + unavailable. + """ + return self._post("/snaps/" + name, {"action": "disable"}) + + def disable_snaps(self, snaps): + """See `self.disable_snap`.""" + return self._post( + "/snaps", + { + "action": "enable", + "snaps": snaps, + }, + ) + + def get_snaps(self): + """GETs a list of installed snaps.""" + return self._get("/snaps") + + def hold_snap(self, name, hold_level="general", time="forever"): + """ + Holds a snap by `name` at `hold_level` until `time`. + + `hold_level` is "general" or "auto-refresh". + `time` is "forever" or an RFC3339 timestamp. + """ + body = _clean_dict( + { + "action": "hold", + "hold-level": hold_level, + "time": time, + }, + ) + + return self._post("/snaps/" + name, body) + + def hold_snaps(self, snaps, hold_level="general", time="forever"): + """ + Same as `self.hold_snap`, except for a batch of snaps. + """ + body = _clean_dict( + { + "action": "hold", + "snaps": snaps, + "hold-level": hold_level, + "time": time, + }, + ) + + return self._post("/snaps", body) + + def install_snap(self, name, revision=None, channel=None, classic=False): + """ + Installs a snap by `name` at `revision`, tracking `channel`. If + `classic`, then snap is installed in classic containment mode. + + If `revision` is not provided, latest will be used. + If `channel` is not provided, stable will be used. + """ + body = _clean_dict( + { + "action": "install", + "revision": revision, + "channel": channel, + "classic": classic, + }, + ) + + return self._post("/snaps/" + name, body) + + def install_snaps(self, snaps): + return self._post("/snaps", {"action": "install", "snaps": snaps}) + + def refresh_snap(self, name, revision=None, channel=None, classic=None): + """ + Refreshes a snap, switching to the given `revision` and `channel` if + provided. + + If `classic` is provided, snap will be changed to the classic + confinement if True, or out of classic confinement if False. + """ + body = _clean_dict( + { + "action": "refresh", + "revision": revision, + "channel": channel, + "classic": classic, + }, + ) + + return self._post("/snaps/" + name, body) + + def refresh_snaps(self, snaps=[]): + """ + Refreshes `snaps` to the latest revision. If `snaps` is empty, + all snaps are refreshed. + """ + body = {"action": "refresh"} + + if snaps: + body["snaps"] = snaps + + return self._post("/snaps", body) + + def revert_snap(self, name, revision=None, classic=None): + """ + Reverts a snap, switching to the given `revision` is provided. + Otherwise switches to the revision used prior to the last + refresh. + + If `classic` is provided, snap will be changed to classic + confinement if True, or out of classic confinement if False. + """ + body = _clean_dict( + { + "action": "revert", + "revision": revision, + "classic": classic, + }, + ) + + return self._post("/snaps/" + name, body) + + def revert_snaps(self, snaps): + """ + Reverts `snaps` to the revision used prior to the last refresh. + """ + return self._post( + "/snaps", + { + "action": "refresh", + "snaps": snaps, + }, + ) + + def remove_snap(self, name): + return self._post("/snaps/" + name, {"action": "remove"}) + + def remove_snaps(self, snaps): + return self._post( + "/snaps", + { + "action": "remove", + "snaps": snaps, + }, + ) + + def switch_snap(self, name, channel="stable"): + """Switches the channel that a snap is tracking.""" + return self._post( + "/snaps/" + name, + _clean_dict( + { + "action": "switch", + "channel": channel, + }, + ), + ) + + def switch_snaps(self, snaps, channel="stable"): + return self._post( + "/snaps", + _clean_dict( + { + "action": "switch", + "snaps": snaps, + "channel": channel, + }, + ), + ) + + def unhold_snap(self, name): + """ + Remove a hold on a snap, allowing it to refresh on it's usual + schedule. + """ + return self._post("/snaps/" + name, {"action": "unhold"}) + + def unhold_snaps(self, snaps): + """See `self.unhold_snap`.""" + return self._post( + "/snaps", + { + "action": "unhold", + "snaps": snaps, + }, + ) + + def _get(self, path): + """Perform a GET request of `path` to the snap REST API.""" + curl, buff = self._setup_curl(path) + + self._perform(curl, buff) + + return json.loads(buff.getvalue()) + + def _perform(self, curl, buff, raise_on_error=True): + """ + Performs a pycurl request, optionally raising on a pycurl or HTTP + error. + """ + try: + curl.perform() + except pycurl.error as e: + raise SnapdHttpException(e) + + response_code = curl.getinfo(curl.RESPONSE_CODE) + if response_code >= 400: + raise SnapdHttpException(buff.getvalue()) + + def _post(self, path, body): + """ + Perform a POST request of `path` to the snap REST API, with the + JSON-ified `body` + """ + curl, buff = self._setup_curl(path) + json_body = json.dumps(body) + + curl.setopt(curl.POSTFIELDS, json_body) + curl.setopt(curl.HTTPHEADER, ["Content-Type: application/json"]) + self._perform(curl, buff) + + return json.loads(buff.getvalue()) + + def _setup_curl(self, path): + """ + Prepares pycurl to communicate with the snap REST API at the given + `path`. + """ + curl = pycurl.Curl() + buff = BytesIO() + + curl.setopt(curl.UNIX_SOCKET_PATH, self._snap_socket) + curl.setopt(curl.URL, self._snap_url + path) + curl.setopt(curl.WRITEDATA, buff) + + return curl, buff + + +def _clean_dict(d): + """ + Only includes keys from `d` in the resulting dict if they are not + None. + """ + return {k: v for k, v in d.items() if v is not None} diff -Nru landscape-client-23.02/landscape/client/snap/tests/test_http.py landscape-client-23.08/landscape/client/snap/tests/test_http.py --- landscape-client-23.02/landscape/client/snap/tests/test_http.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/snap/tests/test_http.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,51 @@ +from unittest import TestCase +from unittest.mock import Mock +from unittest.mock import patch + +from landscape.client.snap.http import SnapdHttpException +from landscape.client.snap.http import SnapHttp + + +class SnapHttpTestCase(TestCase): + """ + Most of the snapd REST API methods require root. I don't think it's + a good idea to be running unit tests as root, and mocking the HTTP + requests isn't a valuable way to test things, so this testsuite is + rather limited. Thankfully most of the methods of SnapHttp are very + simple - we just test that we are sending the right POST bodies for + the more complex cases. + """ + + def test_get_snaps(self): + """get_snaps() returns a dict with a list of installed snaps.""" + http = SnapHttp() + result = http.get_snaps()["result"] + + self.assertTrue(isinstance(result, list)) + self.assertGreater(len(result), 0) + + first = result[0] + for key in ("id", "name", "publisher"): + self.assertIn(key, first) + + def test_get_snaps_error_code(self): + """ + get_snaps raises a SnapdHttpException if the response code from + the snapd HTTP service is >= 400 + """ + http = SnapHttp() + + with patch("pycurl.Curl") as curl_mock: + getinfo_mock = Mock(return_value=400) + curl_mock.return_value = Mock(getinfo=getinfo_mock) + + self.assertRaises(SnapdHttpException, http.get_snaps) + + def test_get_snaps_couldnt_connect(self): + """ + get_snaps raises a SnapdHttpException if we cannot reach the + snapd HTTP service. + """ + http = SnapHttp(snap_socket="/run/garbage.socket") + + self.assertRaises(SnapdHttpException, http.get_snaps) diff -Nru landscape-client-23.02/landscape/client/sysvconfig.py landscape-client-23.08/landscape/client/sysvconfig.py --- landscape-client-23.02/landscape/client/sysvconfig.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/sysvconfig.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -"""Programmatically manage the Landscape client SysV-style init script.""" -from subprocess import Popen - - -class ProcessError(Exception): - """ Error running a process with os.system. """ - - -class SystemdConfig(object): - """Configure and drive the Landscape client service.""" - - def set_start_on_boot(self, flag): - """Make the service decide to start the client when it's run.""" - action = "enable" if flag else "disable" - cmd = ["systemctl", action, "landscape-client.service"] - Popen(cmd).wait() - - def restart_landscape(self): - """Restart the Landscape client service.""" - cmd = ["systemctl", "restart", "landscape-client.service"] - if Popen(cmd).wait(): - raise ProcessError("Could not restart client") - - def stop_landscape(self): - """Stop the Landscape client service.""" - if Popen(["systemctl", "stop", "landscape-client.service"]).wait(): - raise ProcessError("Could not stop client") - - def is_configured_to_run(self): - """ - Return a boolean representing whether the service will start on boot. - """ - cmd = ["systemctl", "is-enabled", "landscape-client.service"] - return Popen(cmd).wait() == 0 - - -# Deprecated alias for charms and scripts still using the old name. -SysVConfig = SystemdConfig diff -Nru landscape-client-23.02/landscape/client/tests/clock.py landscape-client-23.08/landscape/client/tests/clock.py --- landscape-client-23.02/landscape/client/tests/clock.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/clock.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ # Copyright (c) 2001-2007 Twisted Matrix Laboratories. - # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including @@ -7,10 +6,8 @@ # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: - # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -18,7 +15,6 @@ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - """ Copies of certain classes from Twisted 2.5, so that we can use the functionality they provide with Twisted 2.2 and up. These should @@ -30,12 +26,12 @@ * L{twisted.internet.base.DelayedCall}, which didn't grow its C{seconds} argument until after Twisted 2.2. """ +import traceback + from twisted.internet import error -from twisted.python.runtime import seconds as runtimeSeconds from twisted.python import reflect from twisted.python.compat import iteritems - -import traceback +from twisted.python.runtime import seconds as runtimeseconds class Clock: @@ -44,7 +40,8 @@ L{IReactorTime.callLater}. This is commonly useful for writing deterministic unit tests for code which schedules events using this API. """ - rightNow = 0.0 + + rightNow = 0.0 # noqa: N815 def __init__(self): self.calls = [] @@ -60,16 +57,21 @@ """ return self.rightNow - def callLater(self, when, what, *a, **kw): + def callLater(self, when, what, *a, **kw): # noqa: N802 """ See L{twisted.internet.interfaces.IReactorTime.callLater}. """ self.calls.append( - DelayedCall(self.seconds() + when, - what, a, kw, - self.calls.remove, - (lambda c: None), - self.seconds)) + DelayedCall( + self.seconds() + when, + what, + a, + kw, + self.calls.remove, + (lambda c: None), + self.seconds, + ), + ) self.calls.sort(key=lambda a: a.getTime()) return self.calls[-1] @@ -105,8 +107,16 @@ debug = False _str = None - def __init__(self, time, func, args, kw, cancel, reset, - seconds=runtimeSeconds): + def __init__( + self, + time, + func, + args, + kw, + cancel, + reset, + seconds=runtimeseconds, + ): """ @param time: Seconds from the epoch at which to call C{func}. @param func: The callable to call. @@ -132,7 +142,7 @@ if self.debug: self.creator = traceback.format_stack()[:-2] - def getTime(self): + def getTime(self): # noqa: N802 """Return the time at which this call will fire @rtype: C{float} @@ -160,11 +170,11 @@ self._str = str(self) del self.func, self.args, self.kw - def reset(self, secondsFromNow): + def reset(self, secondsfromnow): """Reschedule this call for a different time - @type secondsFromNow: C{float} - @param secondsFromNow: The number of seconds from the time of the + @type secondsfromnow: C{float} + @param secondsfromnow: The number of seconds from the time of the C{reset} call at which this call will be scheduled. @raise AlreadyCancelled: Raised if this call has been cancelled. @@ -175,19 +185,19 @@ elif self.called: raise error.AlreadyCalled else: - newTime = self.seconds() + secondsFromNow - if newTime < self.time: + newtime = self.seconds() + secondsfromnow + if newtime < self.time: self.delayed_time = 0 - self.time = newTime + self.time = newtime self.resetter(self) else: - self.delayed_time = newTime - self.time + self.delayed_time = newtime - self.time - def delay(self, secondsLater): + def delay(self, secondslater): """Reschedule this call for a later time - @type secondsLater: C{float} - @param secondsLater: The number of seconds after the originally + @type secondslater: C{float} + @param secondslater: The number of seconds after the originally scheduled time for which to reschedule this call. @raise AlreadyCancelled: Raised if this call has been cancelled. @@ -198,7 +208,7 @@ elif self.called: raise error.AlreadyCalled else: - self.delayed_time += secondsLater + self.delayed_time += secondslater if self.delayed_time < 0: self.activate_delay() self.resetter(self) @@ -222,34 +232,44 @@ def __str__(self): if self._str is not None: return self._str - if hasattr(self, 'func'): - if hasattr(self.func, 'func_name'): + if hasattr(self, "func"): + if hasattr(self.func, "func_name"): func = self.func.func_name - if hasattr(self.func, 'im_class'): - func = self.func.im_class.__name__ + '.' + func + if hasattr(self.func, "im_class"): + func = self.func.im_class.__name__ + "." + func else: func = reflect.safe_repr(self.func) else: func = None now = self.seconds() - L = ["') + li.append( + "\n\ntraceback at creation: \n\n{}".format( + " ".join(self.creator), + ), + ) + li.append(">") - return "".join(L) + return "".join(li) diff -Nru landscape-client-23.02/landscape/client/tests/helpers.py landscape-client-23.08/landscape/client/tests/helpers.py --- landscape-client-23.02/landscape/client/tests/helpers.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/helpers.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,45 +1,47 @@ import pprint import unittest -from landscape.client.tests.subunit import run_isolated -from landscape.client.watchdog import bootstrap_list - -from landscape.lib.persist import Persist -from landscape.lib.testing import FakeReactor - +from landscape.client.broker.amp import FakeRemoteBroker +from landscape.client.broker.amp import RemoteBrokerConnector from landscape.client.broker.config import BrokerConfiguration -from landscape.client.broker.transport import FakeTransport -from landscape.client.monitor.config import MonitorConfiguration -from landscape.client.monitor.monitor import Monitor -from landscape.client.manager.manager import Manager - from landscape.client.broker.service import BrokerService -from landscape.client.broker.amp import FakeRemoteBroker, RemoteBrokerConnector +from landscape.client.broker.transport import FakeTransport from landscape.client.deployment import BaseConfiguration from landscape.client.manager.config import ManagerConfiguration - +from landscape.client.manager.manager import Manager +from landscape.client.monitor.config import MonitorConfiguration +from landscape.client.monitor.monitor import Monitor +from landscape.client.tests.subunit import run_isolated +from landscape.client.watchdog import bootstrap_list from landscape.lib import testing +from landscape.lib.persist import Persist +from landscape.lib.testing import FakeReactor DEFAULT_ACCEPTED_TYPES = [ - "accepted-types", "registration", "resynchronize", "set-id", - "set-intervals", "unknown-id"] + "accepted-types", + "registration", + "resynchronize", + "set-id", + "set-intervals", + "unknown-id", +] class MessageTestCase(unittest.TestCase): - - def assertMessage(self, obtained, expected): + def assertMessage(self, obtained, expected): # noqa: N802 obtained = obtained.copy() for key in ["api", "timestamp"]: if key not in expected and key in obtained: obtained.pop(key) if obtained != expected: - raise self.failureException("Messages don't match.\n" - "Expected:\n%s\nObtained:\n%s\n" - % (pprint.pformat(expected), - pprint.pformat(obtained))) + raise self.failureException( + "Messages don't match.\n" + f"Expected:\n{pprint.pformat(expected)}\n" + f"Obtained:\n{pprint.pformat(obtained)}\n", + ) - def assertMessages(self, obtained, expected): + def assertMessages(self, obtained, expected): # noqa: N802 self.assertEqual(type(obtained), list) self.assertEqual(type(expected), list) for obtained_message, expected_message in zip(obtained, expected): @@ -49,25 +51,32 @@ diff = abs(expected_len - obtained_len) if obtained_len < expected_len: extra = pprint.pformat(expected[-diff:]) - raise self.failureException("Expected the following %d additional " - "messages:\n%s" % (diff, extra)) + raise self.failureException( + f"Expected the following {diff:d} additional " + f"messages:\n{extra}", + ) elif expected_len < obtained_len: extra = pprint.pformat(obtained[-diff:]) - raise self.failureException("Got %d more messages than expected:\n" - "%s" % (diff, extra)) - + raise self.failureException( + f"Got {diff:d} more messages than expected:\n{extra}", + ) -class LandscapeTest(MessageTestCase, testing.TwistedTestCase, - testing.HelperTestCase, testing.ConfigTestCase, - testing.CompatTestCase): +class LandscapeTest( + MessageTestCase, + testing.TwistedTestCase, + testing.HelperTestCase, + testing.ConfigTestCase, + testing.CompatTestCase, +): def setUp(self): testing.TwistedTestCase.setUp(self) result = testing.HelperTestCase.setUp(self) self._orig_filenames = BaseConfiguration.default_config_filenames BaseConfiguration.default_config_filenames = ( - testing.BaseConfiguration.default_config_filenames) + testing.BaseConfiguration.default_config_filenames + ) return result @@ -77,7 +86,7 @@ testing.TwistedTestCase.tearDown(self) testing.HelperTestCase.tearDown(self) - def makePersistFile(self, *args, **kwargs): + def makePersistFile(self, *args, **kwargs): # noqa: N802 """Return a temporary filename to be used by a L{Persist} object. The possible .old persist file is cleaned up after the test. @@ -97,12 +106,13 @@ return run_method(oself, *args, **kwargs) finally: self.doCleanups() + LandscapeTest.run = run_wrapper LandscapeTest._cleanup_patch = True run_isolated(LandscapeTest, self, result) -class FakeBrokerServiceHelper(object): +class FakeBrokerServiceHelper: """ The following attributes will be set in your test case: - broker_service: A C{BrokerService}. @@ -119,11 +129,14 @@ "computer_title = Some Computer\n" "account_name = some_account\n" "ping_url = http://localhost:91910\n" - "data_path = %s\n" - "log_dir = %s\n" % (test_case.data_path, log_dir)) - - bootstrap_list.bootstrap(data_path=test_case.data_path, - log_dir=log_dir) + f"data_path = {test_case.data_path}\n" + f"log_dir = {log_dir}\n", + ) + + bootstrap_list.bootstrap( + data_path=test_case.data_path, + log_dir=log_dir, + ) config = BrokerConfiguration() config.load(["-c", test_case.config_filename]) @@ -137,7 +150,8 @@ test_case.remote = FakeRemoteBroker( test_case.broker_service.exchanger, test_case.broker_service.message_store, - test_case.broker_service.broker) + test_case.broker_service.broker, + ) class BrokerServiceHelper(FakeBrokerServiceHelper): @@ -150,11 +164,13 @@ """ def set_up(self, test_case): - super(BrokerServiceHelper, self).set_up(test_case) + super().set_up(test_case) test_case.broker_service.startService() # Use different reactor to simulate separate processes self._connector = RemoteBrokerConnector( - FakeReactor(), test_case.broker_service.config) + FakeReactor(), + test_case.broker_service.config, + ) deferred = self._connector.connect() test_case.remote = test_case.successResultOf(deferred) @@ -170,7 +186,7 @@ """ def set_up(self, test_case): - super(MonitorHelper, self).set_up(test_case) + super().set_up(test_case) persist = Persist() persist_filename = test_case.makePersistFile() test_case.config = MonitorConfiguration() @@ -178,8 +194,11 @@ test_case.config.stagger_launch = 0 # let's keep tests deterministic test_case.reactor = FakeReactor() test_case.monitor = Monitor( - test_case.reactor, test_case.config, - persist, persist_filename) + test_case.reactor, + test_case.config, + persist, + persist_filename, + ) test_case.monitor.broker = test_case.remote test_case.mstore = test_case.broker_service.message_store @@ -191,7 +210,7 @@ """ def set_up(self, test_case): - super(ManagerHelper, self).set_up(test_case) + super().set_up(test_case) test_case.config = ManagerConfiguration() test_case.config.load(["-c", test_case.config_filename]) test_case.reactor = FakeReactor() @@ -199,10 +218,15 @@ test_case.manager.broker = test_case.remote -class MockCoverageMonitor(object): - - def __init__(self, count=None, expected_count=None, percent=None, - since_reset=None, warn=None): +class MockCoverageMonitor: + def __init__( + self, + count=None, + expected_count=None, + percent=None, + since_reset=None, + warn=None, + ): self.count = count or 0 self.expected_count = expected_count or 0 self.percent = percent or 0.0 @@ -219,8 +243,7 @@ pass -class MockFrequencyMonitor(object): - +class MockFrequencyMonitor: def __init__(self, count=None, expected_count=None, warn=None): self.count = count or 0 self.expected_count = expected_count or 0 @@ -233,7 +256,7 @@ pass -class FakePersist(object): +class FakePersist: """ Incompletely fake a C{landscape.lib.Persist} to simplify higher level tests that result in an attempt to clear down persisted data. diff -Nru landscape-client-23.02/landscape/client/tests/subunit.py landscape-client-23.08/landscape/client/tests/subunit.py --- landscape-client-23.02/landscape/client/tests/subunit.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/subunit.py 2023-08-17 21:19:32.000000000 +0000 @@ -26,6 +26,7 @@ def test_suite(): import subunit.tests + return subunit.tests.test_suite() @@ -43,7 +44,7 @@ return os.path.join(os.path.dirname(os.path.abspath(base_path)), path) -class TestProtocolServer(object): +class TestProtocolServer: """A class for receiving results from a TestProtocol client.""" OUTSIDE_TEST = 0 @@ -55,150 +56,173 @@ """Create a TestProtocol server instance. client should be an object that provides - - startTest - - addSuccess - - addFailure - - addError - - stopTest + - starttest + - addsuccess + - addfailure + - adderror + - stoptest methods, i.e. a TestResult. """ self.state = TestProtocolServer.OUTSIDE_TEST self.client = client self._stream = stream - def _addError(self, offset, line): - if (self.state == TestProtocolServer.TEST_STARTED and - self.current_test_description == line[offset:-1] - ): + def _adderror(self, offset, line): + if ( + self.state == TestProtocolServer.TEST_STARTED + and self.current_test_description == line[offset:-1] + ): self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None - self.client.addError(self._current_test, RemoteError("")) + self.client.adderror(self._current_test, RemoteError("")) self.client.stopTest(self._current_test) self._current_test = None - elif (self.state == TestProtocolServer.TEST_STARTED and - self.current_test_description + " [" == line[offset:-1]): + elif ( + self.state == TestProtocolServer.TEST_STARTED + and self.current_test_description + " [" == line[offset:-1] + ): self.state = TestProtocolServer.READING_ERROR self._message = "" else: - self.stdOutLineReceived(line) + self.stdoutlinereceived(line) - def _addFailure(self, offset, line): - if (self.state == TestProtocolServer.TEST_STARTED and - self.current_test_description == line[offset:-1] - ): + def _addfailure(self, offset, line): + if ( + self.state == TestProtocolServer.TEST_STARTED + and self.current_test_description == line[offset:-1] + ): self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None - self.client.addFailure(self._current_test, RemoteError()) + self.client.addfailure(self._current_test, RemoteError()) self.client.stopTest(self._current_test) - elif (self.state == TestProtocolServer.TEST_STARTED and - self.current_test_description + " [" == line[offset:-1]): + elif ( + self.state == TestProtocolServer.TEST_STARTED + and self.current_test_description + " [" == line[offset:-1] + ): self.state = TestProtocolServer.READING_FAILURE self._message = "" else: - self.stdOutLineReceived(line) + self.stdoutlinereceived(line) - def _addSuccess(self, offset, line): - if (self.state == TestProtocolServer.TEST_STARTED and - self.current_test_description == line[offset:-1] - ): + def _addsuccess(self, offset, line): + if ( + self.state == TestProtocolServer.TEST_STARTED + and self.current_test_description == line[offset:-1] + ): - self.client.addSuccess(self._current_test) + self.client.addsuccess(self._current_test) self.client.stopTest(self._current_test) self.current_test_description = None self._current_test = None self.state = TestProtocolServer.OUTSIDE_TEST else: - self.stdOutLineReceived(line) + self.stdoutlinereceived(line) - def _appendMessage(self, line): + def _appendmessage(self, line): if line[0:2] == " ]": # quoted ] start self._message += line[1:] else: self._message += line - def endQuote(self, line): + def endquote(self, line): if self.state == TestProtocolServer.READING_FAILURE: self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None - self.client.addFailure(self._current_test, - RemoteError(self._message)) + self.client.addfailure( + self._current_test, + RemoteError(self._message), + ) self.client.stopTest(self._current_test) elif self.state == TestProtocolServer.READING_ERROR: self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None - self.client.addError(self._current_test, - RemoteError(self._message)) + self.client.adderror( + self._current_test, + RemoteError(self._message), + ) self.client.stopTest(self._current_test) else: - self.stdOutLineReceived(line) + self.stdoutlinereceived(line) - def lineReceived(self, line): + def linereceived(self, line): """Call the appropriate local method for the received line.""" if line == "]\n": - self.endQuote(line) - elif (self.state == TestProtocolServer.READING_FAILURE or - self.state == TestProtocolServer.READING_ERROR): - self._appendMessage(line) + self.endquote(line) + elif ( + self.state == TestProtocolServer.READING_FAILURE + or self.state == TestProtocolServer.READING_ERROR + ): + self._appendmessage(line) else: parts = line.split(None, 1) if len(parts) == 2: cmd, rest = parts offset = len(cmd) + 1 - cmd = cmd.strip(':') - if cmd in ('test', 'testing'): - self._startTest(offset, line) - elif cmd == 'error': - self._addError(offset, line) - elif cmd == 'failure': - self._addFailure(offset, line) - elif cmd in ('success', 'successful'): - self._addSuccess(offset, line) + cmd = cmd.strip(":") + if cmd in ("test", "testing"): + self._starttest(offset, line) + elif cmd == "error": + self._adderror(offset, line) + elif cmd == "failure": + self._addfailure(offset, line) + elif cmd in ("success", "successful"): + self._addsuccess(offset, line) else: - self.stdOutLineReceived(line) + self.stdoutLinereceived(line) else: - self.stdOutLineReceived(line) + self.stdOutLinereceived(line) - def lostConnection(self): + def lostconnection(self): """The input connection has finished.""" if self.state == TestProtocolServer.TEST_STARTED: - self.client.addError(self._current_test, - RemoteError("lost connection during test '%s'" - % self.current_test_description)) + self.client.adderror( + self._current_test, + RemoteError( + "lost connection during test " + f"'{self.current_test_description}'", + ), + ) self.client.stopTest(self._current_test) elif self.state == TestProtocolServer.READING_ERROR: - self.client.addError(self._current_test, - RemoteError("lost connection during " - "error report of test " - "'%s'" % - self.current_test_description)) + self.client.adderror( + self._current_test, + RemoteError( + "lost connection during " + "error report of test " + f"'{self.current_test_description}'", + ), + ) self.client.stopTest(self._current_test) elif self.state == TestProtocolServer.READING_FAILURE: - self.client.addError(self._current_test, - RemoteError("lost connection during " - "failure report of test " - "'%s'" % - self.current_test_description)) + self.client.adderror( + self._current_test, + RemoteError( + "lost connection during " + "failure report of test " + f"'{self.current_test_description}'", + ), + ) self.client.stopTest(self._current_test) - def readFrom(self, pipe): + def readfrom(self, pipe): for line in pipe.readlines(): - self.lineReceived(line) - self.lostConnection() + self.linereceived(line) + self.lostconnection() - def _startTest(self, offset, line): - """Internal call to change state machine. Override startTest().""" + def _starttest(self, offset, line): + """Internal call to change state machine. Override starttest().""" if self.state == TestProtocolServer.OUTSIDE_TEST: self.state = TestProtocolServer.TEST_STARTED self._current_test = RemotedTestCase(line[offset:-1]) self.current_test_description = line[offset:-1] self.client.startTest(self._current_test) else: - self.stdOutLineReceived(line) + self.stdoutlinereceived(line) - def stdOutLineReceived(self, line): + def stdoutlinereceived(self, line): self._stream.write(line) @@ -219,30 +243,30 @@ unittest.TestResult.__init__(self) self._stream = stream - def addError(self, test, error): + def adderror(self, test, error): """Report an error in test test.""" - self._stream.write("error: %s [\n" % test.shortDescription()) + self._stream.write(f"error: {test.shortDescription()} [\n") for line in self._exc_info_to_string(error, test).splitlines(): - self._stream.write("%s\n" % line) + self._stream.write(f"{line}\n") self._stream.write("]\n") - def addFailure(self, test, error): + def addfailure(self, test, error): """Report a failure in test test.""" - self._stream.write("failure: %s [\n" % test.shortDescription()) + self._stream.write(f"failure: {test.shortDescription()} [\n") for line in self._exc_info_to_string(error, test).splitlines(): - self._stream.write("%s\n" % line) + self._stream.write(f"{line}\n") self._stream.write("]\n") - def addSuccess(self, test): + def addsuccess(self, test): """Report a success in a test.""" - self._stream.write("successful: %s\n" % test.shortDescription()) + self._stream.write(f"successful: {test.shortDescription()}\n") - def startTest(self, test): + def starttest(self, test): """Mark a test as starting its test run.""" - self._stream.write("test: %s\n" % test.shortDescription()) + self._stream.write(f"test: {test.shortDescription()}\n") -def RemoteError(description=""): +def RemoteError(description=""): # noqa: N802 if description == "": description = "\n" return (RemoteException, RemoteException(description), None) @@ -263,7 +287,8 @@ def error(self, label): raise NotImplementedError( - "%s on RemotedTestCases is not permitted." % label) + f"{label} on RemotedTestCases is not permitted.", + ) def setUp(self): self.error("setUp") @@ -271,45 +296,46 @@ def tearDown(self): self.error("tearDown") - def shortDescription(self): + def shortDescription(self): # noqa: N802 return self.__description def id(self): - return "%s.%s" % (self._strclass(), self.__description) + return f"{self._strclass()}.{self.__description}" def __str__(self): - return "%s (%s)" % (self.__description, self._strclass()) + return f"{self.__description} ({self._strclass()})" def __repr__(self): - return "<%s description='%s'>" % \ - (self._strclass(), self.__description) + return f"<{self._strclass()} description='{self.__description}'>" def run(self, result=None): if result is None: result = self.defaultTestResult() - result.startTest(self) - result.addError(self, RemoteError("Cannot run RemotedTestCases.\n")) + result.starttest(self) + result.adderror(self, RemoteError("Cannot run RemotedTestCases.\n")) result.stopTest(self) def _strclass(self): cls = self.__class__ - return "%s.%s" % (cls.__module__, cls.__name__) + return f"{cls.__module__}.{cls.__name__}" class ExecTestCase(unittest.TestCase): """A test case which runs external scripts for test fixtures.""" - def __init__(self, methodName='runTest'): + def __init__(self, methodName="runTest"): # noqa: N803 """Create an instance of the class that will use the named test - method when executed. Raises a ValueError if the instance does - not have a method with the specified name. + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. """ unittest.TestCase.__init__(self, methodName) - testMethod = getattr(self, methodName) - self.script = join_dir(sys.modules[self.__class__.__module__].__file__, - testMethod.__doc__) + testmethod = getattr(self, methodName) + self.script = join_dir( + sys.modules[self.__class__.__module__].__file__, + testmethod.__doc__, + ) - def countTestCases(self): + def countTestCases(self): # noqa: N802 return 1 def run(self, result=None): @@ -323,9 +349,11 @@ def _run(self, result): protocol = TestProtocolServer(result) - output = subprocess.Popen([self.script], - stdout=subprocess.PIPE).communicate()[0] - protocol.readFrom(StringIO(output)) + output = subprocess.Popen( + [self.script], + stdout=subprocess.PIPE, + ).communicate()[0] + protocol.readfrom(StringIO(output)) class IsolatedTestCase(unittest.TestCase): @@ -347,8 +375,7 @@ def run_isolated(klass, self, result): - """Run a test suite or case in a subprocess, using the run method on klass. - """ + """Test suite or case in a subprocess, using the run method on klass.""" c2pread, c2pwrite = os.pipe() # fixme - error -> result # now fork @@ -364,7 +391,7 @@ # at this point, sys.stdin is redirected, now we want # to filter it to escape ]'s. - ### XXX: test and write that bit. + # XXX: test and write that bit. result = TestProtocolClient(sys.stdout) klass.run(self, result) @@ -378,7 +405,7 @@ os.close(c2pwrite) # hookup a protocol engine protocol = TestProtocolServer(result) - protocol.readFrom(os.fdopen(c2pread, 'rU')) + protocol.readfrom(os.fdopen(c2pread, "rU")) os.waitpid(pid, 0) # TODO return code evaluation. return result diff -Nru landscape-client-23.02/landscape/client/tests/test_accumulate.py landscape-client-23.08/landscape/client/tests/test_accumulate.py --- landscape-client-23.02/landscape/client/tests/test_accumulate.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_accumulate.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,7 @@ -from landscape.lib.persist import Persist -from landscape.client.accumulate import Accumulator, accumulate +from landscape.client.accumulate import accumulate +from landscape.client.accumulate import Accumulator from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.persist import Persist class AccumulateTest(LandscapeTest): diff -Nru landscape-client-23.02/landscape/client/tests/test_amp.py landscape-client-23.08/landscape/client/tests/test_amp.py --- landscape-client-23.02/landscape/client/tests/test_amp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_amp.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,22 +1,24 @@ -import os import errno +import os import subprocess import textwrap +from unittest import mock -import mock - -from twisted.internet.error import ConnectError, CannotListenError +from twisted.internet.error import CannotListenError +from twisted.internet.error import ConnectError from twisted.internet.task import Clock -from landscape.client.tests.helpers import LandscapeTest +from landscape.client.amp import ComponentConnector +from landscape.client.amp import ComponentPublisher +from landscape.client.amp import remote from landscape.client.deployment import Configuration -from landscape.client.amp import ComponentPublisher, ComponentConnector, remote from landscape.client.reactor import LandscapeReactor +from landscape.client.tests.helpers import LandscapeTest from landscape.lib.amp import MethodCallError from landscape.lib.testing import FakeReactor -class TestComponent(object): +class TestComponent: name = "test" @@ -33,16 +35,14 @@ component = TestComponent -class FakeAMP(object): - +class FakeAMP: def __init__(self, locator): self._locator = locator class ComponentPublisherTest(LandscapeTest): - def setUp(self): - super(ComponentPublisherTest, self).setUp() + super().setUp() reactor = FakeReactor() config = Configuration() config.data_path = self.makeDir() @@ -59,7 +59,7 @@ def tearDown(self): self.connector.disconnect() self.publisher.stop() - super(ComponentPublisherTest, self).tearDown() + super().tearDown() def test_remote_methods(self): """Methods decorated with @remote are accessible remotely.""" @@ -74,9 +74,8 @@ class ComponentConnectorTest(LandscapeTest): - def setUp(self): - super(ComponentConnectorTest, self).setUp() + super().setUp() self.reactor = FakeReactor() # XXX this should be dropped once the FakeReactor doesn't use the # real reactor anymore under the hood. @@ -104,8 +103,10 @@ self.log_helper.ignore_errors("Error while connecting to test") def assert_log(ignored): - self.assertIn("Error while connecting to test", - self.logfile.getvalue()) + self.assertIn( + "Error while connecting to test", + self.logfile.getvalue(), + ) result = self.connector.connect(max_retries=0) self.assertFailure(result, ConnectError) @@ -171,10 +172,9 @@ @mock.patch("twisted.python.lockfile.kill") def test_stale_locks_with_dead_pid(self, mock_kill): """Publisher starts with stale lock.""" - mock_kill.side_effect = [ - OSError(errno.ESRCH, "No such process")] - sock_path = os.path.join(self.config.sockets_path, u"test.sock") - lock_path = u"{}.lock".format(sock_path) + mock_kill.side_effect = [OSError(errno.ESRCH, "No such process")] + sock_path = os.path.join(self.config.sockets_path, "test.sock") + lock_path = f"{sock_path}.lock" # fake a PID which does not exist os.symlink("-1", lock_path) @@ -197,9 +197,10 @@ def test_stale_locks_recycled_pid(self, mock_kill): """Publisher starts with stale lock pointing to recycled process.""" mock_kill.side_effect = [ - OSError(errno.EPERM, "Operation not permitted")] - sock_path = os.path.join(self.config.sockets_path, u"test.sock") - lock_path = u"{}.lock".format(sock_path) + OSError(errno.EPERM, "Operation not permitted"), + ] + sock_path = os.path.join(self.config.sockets_path, "test.sock") + lock_path = f"{sock_path}.lock" # fake a PID recycled by a known process which isn't landscape (init) os.symlink("1", lock_path) @@ -222,14 +223,19 @@ @mock.patch("twisted.python.lockfile.kill") def test_with_valid_lock(self, mock_kill): """Publisher raises lock error if a valid lock is held.""" - sock_path = os.path.join(self.config.sockets_path, u"test.sock") - lock_path = u"{}.lock".format(sock_path) + sock_path = os.path.join(self.config.sockets_path, "test.sock") + lock_path = f"{sock_path}.lock" # fake a landscape process - app = self.makeFile(textwrap.dedent("""\ + app = self.makeFile( + textwrap.dedent( + """\ #!/usr/bin/python3 import time time.sleep(10) - """), basename="landscape-manager") + """, + ), + basename="landscape-manager", + ) os.chmod(app, 0o755) call = subprocess.Popen([app]) self.addCleanup(call.terminate) diff -Nru landscape-client-23.02/landscape/client/tests/test_configuration.py landscape-client-23.08/landscape/client/tests/test_configuration.py --- landscape-client-23.02/landscape/client/tests/test_configuration.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_configuration.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,50 +1,68 @@ -from __future__ import print_function - -import mock -from functools import partial import os import sys import textwrap import unittest +from functools import partial +from unittest import mock -from twisted.internet.defer import succeed, fail, Deferred +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import succeed from twisted.python.compat import iteritems +from landscape.client.broker.registration import Identity +from landscape.client.broker.registration import RegistrationError +from landscape.client.broker.tests.helpers import BrokerConfigurationHelper +from landscape.client.broker.tests.helpers import RemoteBrokerHelper +from landscape.client.configuration import bootstrap_tree +from landscape.client.configuration import ConfigurationError +from landscape.client.configuration import determine_exit_code +from landscape.client.configuration import done +from landscape.client.configuration import exchange_failure +from landscape.client.configuration import EXIT_NOT_REGISTERED +from landscape.client.configuration import failure +from landscape.client.configuration import got_connection +from landscape.client.configuration import got_error +from landscape.client.configuration import handle_registration_errors +from landscape.client.configuration import ImportOptionError +from landscape.client.configuration import is_registered +from landscape.client.configuration import LandscapeSetupConfiguration +from landscape.client.configuration import LandscapeSetupScript +from landscape.client.configuration import main +from landscape.client.configuration import print_text +from landscape.client.configuration import prompt_yes_no +from landscape.client.configuration import register +from landscape.client.configuration import registration_info_text +from landscape.client.configuration import report_registration_outcome +from landscape.client.configuration import setup +from landscape.client.configuration import show_help +from landscape.client.configuration import store_public_key_data +from landscape.client.configuration import success +from landscape.client.serviceconfig import ServiceConfigException +from landscape.client.tests.helpers import FakeBrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.amp import MethodCallError from landscape.lib.compat import ConfigParser from landscape.lib.compat import StringIO -from landscape.client.broker.registration import RegistrationError, Identity -from landscape.client.broker.tests.helpers import ( - RemoteBrokerHelper, BrokerConfigurationHelper) -from landscape.client.configuration import ( - print_text, LandscapeSetupScript, LandscapeSetupConfiguration, - register, setup, main, setup_init_script_and_start_client, - ConfigurationError, prompt_yes_no, show_help, - ImportOptionError, store_public_key_data, - bootstrap_tree, got_connection, success, failure, exchange_failure, - handle_registration_errors, done, got_error, report_registration_outcome, - determine_exit_code, is_registered, registration_info_text, - EXIT_NOT_REGISTERED) -from landscape.lib.amp import MethodCallError -from landscape.lib.fetch import HTTPCodeError, PyCurlError +from landscape.lib.fetch import HTTPCodeError +from landscape.lib.fetch import PyCurlError from landscape.lib.fs import read_binary_file from landscape.lib.persist import Persist from landscape.lib.testing import EnvironSaverHelper -from landscape.client.sysvconfig import ProcessError -from landscape.client.tests.helpers import ( - LandscapeTest, FakeBrokerServiceHelper) class LandscapeConfigurationTest(LandscapeTest): - def get_config(self, args, data_path=None): if data_path is None: data_path = os.path.join(self.makeDir(), "client") if "--config" not in args and "-c" not in args: - filename = self.makeFile(""" + filename = self.makeFile( + """ [client] url = https://landscape.canonical.com/message-system -""") +""", + ) args.extend(["--config", filename, "--data-path", data_path]) config = LandscapeSetupConfiguration() config.load(args) @@ -52,7 +70,6 @@ class SuccessTests(unittest.TestCase): - def test_success(self): """The success handler records the success.""" results = [] @@ -61,7 +78,6 @@ class FailureTests(unittest.TestCase): - def test_failure(self): """The failure handler records the failure and returns non-zero.""" results = [] @@ -70,13 +86,14 @@ class ExchangeFailureTests(unittest.TestCase): - def test_exchange_failure_ssl(self): """The exchange_failure() handler records whether or not the failure involved SSL or not and returns non-zero.""" results = [] - self.assertNotEqual(0, - exchange_failure(results.append, ssl_error=True)) + self.assertNotEqual( + 0, + exchange_failure(results.append, ssl_error=True), + ) self.assertEqual(["ssl-error"], results) def test_exchange_failure_non_ssl(self): @@ -85,19 +102,21 @@ involved SSL or not and returns non-zero. """ results = [] - self.assertNotEqual(0, - exchange_failure(results.append, ssl_error=False)) + self.assertNotEqual( + 0, + exchange_failure(results.append, ssl_error=False), + ) self.assertEqual(["non-ssl-error"], results) class HandleRegistrationErrorsTests(unittest.TestCase): - def test_handle_registration_errors_traps(self): """ The handle_registration_errors() function traps RegistrationError and MethodCallError errors. """ - class FauxFailure(object): + + class FauxFailure: def trap(self, *trapped): self.trapped_exceptions = trapped @@ -110,17 +129,23 @@ self.assertNotEqual( 0, handle_registration_errors( - add_result, faux_failure, faux_connector)) + add_result, + faux_failure, + faux_connector, + ), + ) self.assertTrue( [RegistrationError, MethodCallError], - faux_failure.trapped_exceptions) + faux_failure.trapped_exceptions, + ) def test_handle_registration_errors_disconnects_cleanly(self): """ The handle_registration_errors function disconnects the broker connector cleanly. """ - class FauxFailure(object): + + class FauxFailure: def trap(self, *trapped): pass @@ -133,7 +158,11 @@ self.assertNotEqual( 0, handle_registration_errors( - add_result, faux_failure, faux_connector)) + add_result, + faux_failure, + faux_connector, + ), + ) self.assertTrue(faux_connector.was_disconnected) def test_handle_registration_errors_as_errback(self): @@ -156,23 +185,25 @@ deferred = Deferred() deferred.addCallback(i_raise) deferred.addErrback( - partial(handle_registration_errors, add_result), faux_connector) + partial(handle_registration_errors, add_result), + faux_connector, + ) deferred.callback("") # This kicks off the callback chain. self.assertEqual([True], calls) class DoneTests(unittest.TestCase): - def test_done(self): """The done() function handles cleaning up.""" - class FauxConnector(object): + + class FauxConnector: was_disconnected = False def disconnect(self): self.was_disconnected = True - class FauxReactor(object): + class FauxReactor: was_stopped = False def stop(self): @@ -187,12 +218,11 @@ class GotErrorTests(unittest.TestCase): - def test_got_error(self): """The got_error() function handles displaying errors and exiting.""" - class FauxFailure(object): - def getTraceback(self): + class FauxFailure: + def getTraceback(self): # noqa: N802 return "traceback" results = [] @@ -203,16 +233,19 @@ mock_reactor = mock.Mock() - got_error(FauxFailure(), reactor=mock_reactor, - add_result=results.append, print=faux_print) + got_error( + FauxFailure(), + reactor=mock_reactor, + add_result=results.append, + print=faux_print, + ) mock_reactor.stop.assert_called_once_with() self.assertIsInstance(results[0], SystemExit) - self.assertEqual([('traceback', sys.stderr)], printed) + self.assertEqual([("traceback", sys.stderr)], printed) class PrintTextTest(LandscapeTest): - @mock.patch("sys.stdout", new_callable=StringIO) def test_default(self, stdout): print_text("Hi!") @@ -230,24 +263,27 @@ class PromptYesNoTest(unittest.TestCase): - def test_prompt_yes_no(self): """ prompt_yes_no prompts a question and returns a boolean with the answer. """ - comparisons = [("Y", True), - ("y", True), - ("yEs", True), - ("YES", True), - ("n", False), - ("N", False), - ("No", False), - ("no", False), - ("", True)] + comparisons = [ + ("Y", True), + ("y", True), + ("yEs", True), + ("YES", True), + ("n", False), + ("N", False), + ("No", False), + ("no", False), + ("", True), + ] for input_string, result in comparisons: - with mock.patch("landscape.client.configuration.input", - return_value=input_string) as mock_input: + with mock.patch( + "landscape.client.configuration.input", + return_value=input_string, + ) as mock_input: prompt_yes_no("Foo") mock_input.assert_called_once_with("Foo [Y/n]: ") @@ -266,7 +302,6 @@ class ShowHelpTest(unittest.TestCase): - @mock.patch("landscape.client.configuration.print_text") def test_show_help(self, mock_print_text): show_help("\n\n \n Hello \n \n world! \n \n\n") @@ -274,13 +309,13 @@ class LandscapeSetupScriptTest(LandscapeTest): - def setUp(self): - super(LandscapeSetupScriptTest, self).setUp() + super().setUp() self.config_filename = self.makeFile() class MyLandscapeSetupConfiguration(LandscapeSetupConfiguration): default_config_filenames = [self.config_filename] + self.config = MyLandscapeSetupConfiguration() self.script = LandscapeSetupScript(self.config) @@ -298,13 +333,16 @@ mock_input.assert_called_once_with("Message [default]: ") self.assertEqual(self.config.computer_title, "default") - @mock.patch("landscape.client.configuration.input", - side_effect=("", "Desktop")) + @mock.patch( + "landscape.client.configuration.input", + side_effect=("", "Desktop"), + ) @mock.patch("landscape.client.configuration.show_help") def test_prompt_with_required(self, mock_show_help, mock_input): self.script.prompt("computer_title", "Message", True) mock_show_help.assert_called_once_with( - "This option is required to configure Landscape.") + "This option is required to configure Landscape.", + ) calls = [mock.call("Message: "), mock.call("Message: ")] mock_input.assert_has_calls(calls) @@ -318,6 +356,58 @@ mock_input.assert_called_once_with("Message [Desktop]: ") self.assertEqual(self.config.computer_title, "Desktop") + @mock.patch( + "landscape.client.configuration.input", + return_value="landscape.hello.com", + ) + @mock.patch( + "landscape.client.configuration.prompt_yes_no", + return_value=True, + ) + @mock.patch("landscape.client.configuration.show_help") + def test_prompt_landscape_edition( + self, + mock_help, + prompt_yes_no, + mock_input, + ): + self.script.query_landscape_edition() + self.script.query_account_name() + self.assertEqual( + self.config.ping_url, + "http://landscape.hello.com/ping", + ) + self.assertEqual( + self.config.url, + "https://landscape.hello.com/message-system", + ) + self.assertEqual(self.config.account_name, "standalone") + + @mock.patch( + "landscape.client.configuration.input", + return_value="landscape.hello.com", + ) + @mock.patch( + "landscape.client.configuration.prompt_yes_no", + return_value=False, + ) + @mock.patch("landscape.client.configuration.show_help") + def test_prompt_landscape_edition_saas( + self, + mock_help, + prompt_yes_no, + mock_input, + ): + self.script.query_landscape_edition() + self.assertEqual( + self.config.ping_url, + "http://landscape.canonical.com/ping", + ) + self.assertEqual( + self.config.url, + "https://landscape.canonical.com/message-system", + ) + @mock.patch("landscape.client.configuration.input", return_value="Yay") def test_prompt_for_unknown_variable(self, mock_input): """ @@ -330,8 +420,10 @@ mock_input.assert_called_once_with("Variable: ") self.assertEqual(self.config.variable, "Yay") - @mock.patch("landscape.client.configuration.getpass.getpass", - side_effect=("password", "password")) + @mock.patch( + "landscape.client.configuration.getpass.getpass", + side_effect=("password", "password"), + ) def test_password_prompt_simple_matching(self, mock_getpass): self.script.password_prompt("registration_key", "Password") calls = [mock.call("Password: "), mock.call("Please confirm: ")] @@ -339,45 +431,52 @@ self.assertEqual(self.config.registration_key, "password") @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.getpass.getpass", - side_effect=("password", "", "password", "password")) - def test_password_prompt_simple_non_matching(self, mock_getpass, - mock_show_help): + @mock.patch( + "landscape.client.configuration.getpass.getpass", + side_effect=("password", "", "password", "password"), + ) + def test_password_prompt_simple_non_matching( + self, + mock_getpass, + mock_show_help, + ): self.script.password_prompt("registration_key", "Password") - calls = [mock.call("Password: "), mock.call("Please confirm: "), - mock.call("Password: "), mock.call("Please confirm: ")] + calls = [ + mock.call("Password: "), + mock.call("Please confirm: "), + mock.call("Password: "), + mock.call("Please confirm: "), + ] mock_getpass.assert_has_calls(calls) mock_show_help.assert_called_once_with("Keys must match.") self.assertEqual(self.config.registration_key, "password") @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.getpass.getpass", - side_effect=("", "password", "password")) - def test_password_prompt_simple_matching_required(self, mock_getpass, - mock_show_help): + @mock.patch( + "landscape.client.configuration.getpass.getpass", + side_effect=("", "password", "password"), + ) + def test_password_prompt_simple_matching_required( + self, + mock_getpass, + mock_show_help, + ): self.script.password_prompt("registration_key", "Password", True) - calls = [mock.call("Password: "), mock.call("Password: "), - mock.call("Please confirm: ")] + calls = [ + mock.call("Password: "), + mock.call("Password: "), + mock.call("Please confirm: "), + ] mock_getpass.assert_has_calls(calls) mock_show_help.assert_called_once_with( - "This option is required to configure Landscape.") + "This option is required to configure Landscape.", + ) self.assertEqual(self.config.registration_key, "password") - @mock.patch("landscape.client.configuration.show_help") - def test_query_computer_title(self, mock_show_help): - help_snippet = "The computer title you" - self.script.prompt = mock.Mock() - self.script.query_computer_title() - self.script.prompt.assert_called_once_with( - "computer_title", "This computer's title", True) - [call] = mock_show_help.mock_calls - self.assertTrue(call.strip().startswith(help_snippet)) - @mock.patch("landscape.client.configuration.input") - def test_query_computer_title_defined_on_command_line( - self, mock_input): + def test_query_computer_title_defined_on_command_line(self, mock_input): self.config.load_command_line(["-t", "Computer title"]) self.script.query_computer_title() mock_input.assert_not_called() @@ -388,7 +487,10 @@ self.script.prompt = mock.Mock() self.script.query_account_name() self.script.prompt.assert_called_once_with( - "account_name", "Account name", True) + "account_name", + "Account name", + True, + ) [call] = mock_show_help.mock_calls self.assertTrue(call.strip().startswith(help_snippet)) @@ -406,13 +508,17 @@ self.script.password_prompt = mock.Mock() self.script.query_registration_key() self.script.password_prompt.assert_called_once_with( - "registration_key", "Account registration key") + "registration_key", + "(Optional) Registration Key", + ) [call] = mock_show_help.mock_calls self.assertTrue(call.strip().startswith(help_snippet)) @mock.patch("landscape.client.configuration.getpass.getpass") def test_query_registration_key_defined_on_command_line( - self, mock_getpass): + self, + mock_getpass, + ): self.config.load_command_line(["-p", "shared-secret"]) self.script.query_registration_key() mock_getpass.assert_not_called() @@ -423,16 +529,24 @@ self.script.prompt = mock.Mock() self.script.query_proxies() - calls = [mock.call("http_proxy", "HTTP proxy URL"), - mock.call("https_proxy", "HTTPS proxy URL")] + calls = [ + mock.call("http_proxy", "HTTP proxy URL"), + mock.call("https_proxy", "HTTPS proxy URL"), + ] self.script.prompt.assert_has_calls(calls) [call] = mock_show_help.mock_calls self.assertTrue(call.strip().startswith(help_snippet)) @mock.patch("landscape.client.configuration.input") def test_query_proxies_defined_on_command_line(self, mock_input): - self.config.load_command_line(["--http-proxy", "localhost:8080", - "--https-proxy", "localhost:8443"]) + self.config.load_command_line( + [ + "--http-proxy", + "localhost:8080", + "--https-proxy", + "localhost:8443", + ], + ) self.script.query_proxies() mock_input.assert_not_called() @@ -453,174 +567,39 @@ self.config.load_command_line(["--https-proxy", "localhost:8443"]) self.script.query_proxies() self.script.prompt.assert_called_once_with( - "http_proxy", "HTTP proxy URL") + "http_proxy", + "HTTP proxy URL", + ) [call] = mock_show_help.mock_calls self.assertTrue(call.strip().startswith(help_snippet)) - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=False) - def test_query_script_plugin_no(self, mock_prompt_yes_no, mock_show_help): - help_snippet = "Landscape has a feature which enables administrators" - - self.script.query_script_plugin() - self.assertEqual(self.config.include_manager_plugins, "") - mock_prompt_yes_no.assert_called_once_with( - "Enable script execution?", default=False) - [call] = mock_show_help.mock_calls - self.assertTrue(call.strip().startswith(help_snippet)) - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=True) - def test_query_script_plugin_yes(self, mock_prompt_yes_no, mock_show_help): - """ - If the user *does* want script execution, then the script asks which - users to enable it for. - """ - help_snippet = "Landscape has a feature which enables administrators" - self.script.prompt = mock.Mock() - - self.script.query_script_plugin() - mock_prompt_yes_no.assert_called_once_with( - "Enable script execution?", default=False) - first_call, second_call = mock_show_help.mock_calls - self.assertTrue(first_call.strip().startswith(help_snippet)) - self.assertTrue(second_call.strip().startswith( - "By default, scripts are restricted")) - - self.script.prompt.assert_called_once_with( - "script_users", "Script users") - self.assertEqual(self.config.include_manager_plugins, - "ScriptExecution") - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=False) - def test_disable_script_plugin(self, mock_prompt_yes_no, mock_show_help): - """ - Answering NO to enabling the script plugin while it's already enabled - will disable it. - """ - self.config.include_manager_plugins = "ScriptExecution" - help_snippet = "Landscape has a feature which enables administrators" - - self.script.query_script_plugin() - mock_prompt_yes_no.assert_called_once_with( - "Enable script execution?", default=True) - self.assertEqual(self.config.include_manager_plugins, "") - [call] = mock_show_help.mock_calls - self.assertTrue(call.strip().startswith(help_snippet)) - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=False) - def test_disabling_script_plugin_leaves_existing_inclusions( - self, mock_prompt_yes_no, mock_show_help): - """ - Disabling the script execution plugin doesn't remove other included - plugins. - """ - self.config.include_manager_plugins = "FooPlugin, ScriptExecution" - - self.script.query_script_plugin() - mock_prompt_yes_no.assert_called_once_with( - "Enable script execution?", default=True) - self.assertEqual(self.config.include_manager_plugins, "FooPlugin") - mock_show_help.assert_called_once_with(mock.ANY) - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=True) - def test_enabling_script_plugin_leaves_existing_inclusions( - self, mock_prompt_yes_no, mock_show_help): - """ - Enabling the script execution plugin doesn't remove other included - plugins. - """ - self.config.include_manager_plugins = "FooPlugin" - - self.script.prompt = mock.Mock() - - self.script.query_script_plugin() - mock_prompt_yes_no.assert_called_once_with( - "Enable script execution?", default=False) - - self.script.prompt.assert_called_once_with( - "script_users", "Script users") - self.assertEqual(2, mock_show_help.call_count) - self.assertEqual(self.config.include_manager_plugins, - "FooPlugin, ScriptExecution") - - @mock.patch("landscape.client.configuration.input") - def test_query_script_plugin_defined_on_command_line(self, mock_input): - self.config.load_command_line( - ["--include-manager-plugins", "ScriptExecution", - "--script-users", "root, nobody"]) - self.script.query_script_plugin() - mock_input.assert_not_called() - self.assertEqual(self.config.include_manager_plugins, - "ScriptExecution") - self.assertEqual(self.config.script_users, "root, nobody") - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=True) - def test_query_script_manager_plugins_defined_on_command_line( - self, mock_prompt_yes_no, mock_show_help): - self.script.prompt = mock.Mock() - - self.config.load_command_line( - ["--include-manager-plugins", "FooPlugin, ScriptExecution"]) - self.script.query_script_plugin() - self.script.prompt.assert_called_once_with( - "script_users", "Script users") - self.assertEqual(2, mock_show_help.call_count) - self.assertEqual(self.config.include_manager_plugins, - "FooPlugin, ScriptExecution") - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=True) - @mock.patch("landscape.client.configuration.pwd.getpwnam", - return_value=None) - def test_query_script_users_defined_on_command_line(self, mock_getpwnam, - mock_prompt_yes_no, - mock_show_help): - """ - Confirm with the user for users specified for the ScriptPlugin. - """ - self.script.prompt_get_input = mock.Mock(return_value=None) - - self.config.include_manager_plugins = "FooPlugin" - - self.config.load_command_line( - ["--script-users", "root, nobody, landscape"]) - self.script.query_script_plugin() - - mock_getpwnam.assert_called_with("landscape") - mock_prompt_yes_no.assert_called_once_with( - "Enable script execution?", default=False) - self.script.prompt_get_input.assert_called_once_with( - "Script users [root, nobody, landscape]: ", False) - self.assertEqual(2, mock_show_help.call_count) - self.assertEqual(self.config.script_users, - "root, nobody, landscape") - - @mock.patch("landscape.client.configuration.pwd.getpwnam", - side_effect=(None, None, None, KeyError())) + @mock.patch( + "landscape.client.configuration.pwd.getpwnam", + side_effect=(None, None, None, KeyError()), + ) def test_query_script_users_on_command_line_with_unknown_user( - self, mock_getpwnam): + self, + mock_getpwnam, + ): """ If several users are provided on the command line, we verify the users and raise a ConfigurationError if any are unknown on this system. """ self.config.load_command_line( - ["--script-users", "root, nobody, landscape, unknown", - "--include-manager-plugins", "ScriptPlugin"]) + [ + "--script-users", + "root, nobody, landscape, unknown", + "--include-manager-plugins", + "ScriptPlugin", + ], + ) self.assertRaises(ConfigurationError, self.script.query_script_plugin) - calls = [mock.call("root"), mock.call("nobody"), - mock.call("landscape"), mock.call("unknown")] + calls = [ + mock.call("root"), + mock.call("nobody"), + mock.call("landscape"), + mock.call("unknown"), + ] mock_getpwnam.assert_has_calls(calls) def test_query_script_users_defined_on_command_line_with_all_user(self): @@ -628,107 +607,68 @@ We shouldn't accept all as a synonym for ALL """ self.config.load_command_line( - ["--script-users", "all", - "--include-manager-plugins", "ScriptPlugin"]) + [ + "--script-users", + "all", + "--include-manager-plugins", + "ScriptPlugin", + ], + ) self.assertRaises(ConfigurationError, self.script.query_script_plugin) - def test_query_script_users_defined_on_command_line_with_ALL_user(self): + def test_query_script_users_defined_on_command_line_with_ALL_user( # noqa: E501,N802 + self, + ): """ ALL is the special marker for all users. """ self.config.load_command_line( - ["--script-users", "ALL", - "--include-manager-plugins", "ScriptPlugin"]) + [ + "--script-users", + "ALL", + "--include-manager-plugins", + "ScriptPlugin", + ], + ) self.script.query_script_plugin() self.assertEqual(self.config.script_users, "ALL") - def test_query_script_users_command_line_with_ALL_and_extra_user(self): + def test_query_script_users_command_line_with_ALL_and_extra_user( # noqa: E501,N802 + self, + ): """ If ALL and additional users are provided as the users on the command line, this should raise an appropriate ConfigurationError. """ self.config.load_command_line( - ["--script-users", "ALL, kevin", - "--include-manager-plugins", "ScriptPlugin"]) + [ + "--script-users", + "ALL, kevin", + "--include-manager-plugins", + "ScriptPlugin", + ], + ) self.assertRaises(ConfigurationError, self.script.query_script_plugin) - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no", - return_value=True) - def test_invalid_user_entered_by_user(self, mock_prompt_yes_no, - mock_show_help): - """ - If an invalid user is entered on the command line the user should be - informed and prompted again. - """ - help_snippet = "Landscape has a feature which enables administrators" - self.script.prompt_get_input = mock.Mock( - side_effect=(u"nonexistent", u"root")) - - self.script.query_script_plugin() - self.assertEqual(self.config.script_users, "root") - first_call, second_call, third_call = mock_show_help.mock_calls - self.assertTrue(first_call.strip().startswith(help_snippet)) - self.assertTrue(second_call.strip().startswith( - "By default, scripts are restricted")) - self.assertTrue(third_call.strip().startswith( - "Unknown system users: nonexistsent")) - - @mock.patch("landscape.client.configuration.show_help") - def test_tags_not_defined_on_command_line(self, mock_show_help): - """ - If tags are not provided, the user should be prompted for them. - """ - help_snippet = ("You may provide tags for this computer e.g. " - "server,precise.") - self.script.prompt = mock.Mock() - - self.script.query_tags() - self.script.prompt.assert_called_once_with( - "tags", "Tags", False) - [call] = mock_show_help.mock_calls - self.assertTrue(call.strip().startswith(help_snippet)) - - @mock.patch("landscape.client.configuration.show_help") - @mock.patch("landscape.client.configuration.prompt_yes_no") - def test_invalid_tags_entered_by_user(self, mock_prompt_yes_no, - mock_show_help): - """ - If tags are not provided, the user should be prompted for them, and - they should be valid tags, if not the user should be prompted for them - again. - """ - self.script.prompt_get_input = mock.Mock( - side_effect=(u"", u"london")) - - self.script.query_tags() - first_call, second_call = mock_show_help.mock_calls - self.assertTrue( - first_call.strip().startswith("You may provide tags for this " - "computer e.g. server,precise.")) - self.assertTrue( - second_call.strip().startswith("Tag names may only contain " - "alphanumeric characters.")) - calls = [("Tags: ", False), ("Tags: ", False)] - self.script.prompt_get_input.has_calls(calls) - @mock.patch("landscape.client.configuration.input") def test_tags_defined_on_command_line(self, mock_input): """ Tags defined on the command line can be verified by the user. """ - self.config.load_command_line(["--tags", u"server,london"]) + self.config.load_command_line(["--tags", "server,london"]) self.script.query_tags() - self.assertEqual(self.config.tags, u"server,london") + self.assertEqual(self.config.tags, "server,london") mock_input.assert_not_called() @mock.patch("landscape.client.configuration.input") def test_invalid_tags_defined_on_command_line_raises_error( - self, mock_input): + self, + mock_input, + ): """ Invalid tags on the command line raises a ConfigurationError. """ - self.config.load_command_line(["--tags", u""]) + self.config.load_command_line(["--tags", ""]) self.assertRaises(ConfigurationError, self.script.query_tags) mock_input.assert_not_called() @@ -737,8 +677,10 @@ """ If an access group is not provided, the user should be prompted for it. """ - help_snippet = ("You may provide an access group for this computer " - "e.g. webservers.") + help_snippet = ( + "You may provide an access group for this computer " + "e.g. webservers." + ) self.script.prompt = mock.Mock() self.script.query_access_group() [call] = mock_show_help.mock_calls @@ -750,9 +692,9 @@ When an access group is provided on the command line, do not prompt the user for it. """ - self.config.load_command_line(["--access-group", u"webservers"]) + self.config.load_command_line(["--access-group", "webservers"]) self.script.query_access_group() - self.assertEqual(self.config.access_group, u"webservers") + self.assertEqual(self.config.access_group, "webservers") mock_input.assert_not_called() @mock.patch("landscape.client.configuration.input") @@ -762,36 +704,8 @@ self.assertEqual(self.config.stagger_launch, 0.5) mock_input.assert_not_called() - @mock.patch("landscape.client.configuration.show_help") - def test_show_header(self, mock_show_help): - help_snippet = "This script will" - self.script.show_header() - [call] = mock_show_help.mock_calls - self.assertTrue(call.strip().startswith(help_snippet)) - - def test_run(self): - self.script.show_header = mock.Mock() - self.script.query_computer_title = mock.Mock() - self.script.query_account_name = mock.Mock() - self.script.query_registration_key = mock.Mock() - self.script.query_proxies = mock.Mock() - self.script.query_script_plugin = mock.Mock() - self.script.query_access_group = mock.Mock() - self.script.query_tags = mock.Mock() - self.script.run() - - self.script.show_header.assert_called_once_with() - self.script.query_computer_title.assert_called_once_with() - self.script.query_account_name.assert_called_once_with() - self.script.query_registration_key.assert_called_once_with() - self.script.query_proxies.assert_called_once_with() - self.script.query_script_plugin.assert_called_once_with() - self.script.query_access_group.assert_called_once_with() - self.script.query_tags.assert_called_once_with() - class BootstrapTreeTest(LandscapeConfigurationTest): - @mock.patch("os.chmod") def test_bootstrap_tree(self, mock_chmod): """ @@ -819,16 +733,18 @@ helpers = [EnvironSaverHelper] def setUp(self): - super(ConfigurationFunctionsTest, self).setUp() + super().setUp() getuid_patcher = mock.patch("os.getuid", return_value=0) bootstrap_tree_patcher = mock.patch( - "landscape.client.configuration.bootstrap_tree") + "landscape.client.configuration.bootstrap_tree", + ) self.mock_getuid = getuid_patcher.start() self.mock_bootstrap_tree = bootstrap_tree_patcher.start() def cleanup(): getuid_patcher.stop() bootstrap_tree_patcher.stop() + self.addCleanup(cleanup) def get_content(self, config): @@ -838,24 +754,24 @@ try: config.config = config_file config.write() - return open(config.config, "r").read().strip() + "\n" + return open(config.config).read().strip() + "\n" finally: config.config = original_config @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.getpass.getpass") @mock.patch("landscape.client.configuration.input") - def test_setup(self, mock_input, mock_getpass, mock_print_text): - filename = self.makeFile("[client]\n" - "computer_title = Old Title\n" - "account_name = Old Name\n" - "registration_key = Old Password\n" - "http_proxy = http://old.proxy\n" - "https_proxy = https://old.proxy\n" - "url = http://url\n" - "include_manager_plugins = ScriptExecution\n" - "access_group = webservers\n" - "tags = london, server") + @mock.patch("landscape.client.configuration.show_help") + def test_setup(self, mock_help, mock_input, mock_getpass, mock_print_text): + filename = self.makeFile( + "[client]\n" + "computer_title = Old Title\n" + "account_name = Old Name\n" + "registration_key = Old Password\n" + "http_proxy = http://old.proxy\n" + "https_proxy = https://old.proxy\n" + "url = http://url\n", + ) def side_effect_input(prompt): fixtures = { @@ -863,22 +779,23 @@ "[Old Name]": "New Name", "[http://old.proxy]": "http://new.proxy", "[https://old.proxy]": "https://new.proxy", - "Enable script execution? [Y/n]": "n", - "Access group [webservers]: ": u"databases", - "Tags [london, server]: ": u"glasgow, laptop", + "Will you be using your own " + "Self-Hosted Landscape installation? [y/N]": "n", } for key, value in iteritems(fixtures): if key in prompt: return value - raise KeyError("Couldn't find answer for {}".format(prompt)) + raise KeyError(f"Couldn't find answer for {prompt}") def side_effect_getpass(prompt): - fixtures = {"Account registration key:": "New Password", - "Please confirm:": "New Password"} + fixtures = { + "(Optional) Registration Key:": "New Password", + "Please confirm:": "New Password", + } for key, value in iteritems(fixtures): if key in prompt: return value - raise KeyError("Couldn't find answer for {}".format(prompt)) + raise KeyError(f"Couldn't find answer for {prompt}") mock_input.side_effect = side_effect_input mock_getpass.side_effect = side_effect_getpass @@ -890,66 +807,105 @@ # Reload it to ensure it was written down. config.reload() + self.assertEqual( + "sudo landscape-config " + "--account-name 'New Name' " + "--registration-key HIDDEN " + "--https-proxy https://new.proxy " + "--http-proxy http://new.proxy", + mock_help.mock_calls[-1].args[0], + ) + + proxy_arg = mock_help.mock_calls[-3].args[0] + self.assertIn("https://new.proxy", proxy_arg) + self.assertIn("http://new.proxy", proxy_arg) + + summary_arg = mock_help.mock_calls[-4].args[0] + self.assertIn("Computer's Title: New Title", summary_arg) + self.assertIn("Account Name: New Name", summary_arg) + self.assertIn("Landscape FQDN: landscape.canonical.com", summary_arg) + self.assertIn("Registration Key: True", summary_arg) + self.assertEqual(config.computer_title, "New Title") self.assertEqual(config.account_name, "New Name") self.assertEqual(config.registration_key, "New Password") self.assertEqual(config.http_proxy, "http://new.proxy") self.assertEqual(config.https_proxy, "https://new.proxy") self.assertEqual(config.include_manager_plugins, "") - self.assertEqual(config.access_group, u"databases") - self.assertEqual(config.tags, u"glasgow, laptop") - @mock.patch("landscape.client.configuration.SysVConfig") - def test_silent_setup(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_setup(self, mock_serviceconfig): """ Only command-line options are used in silent mode and registration is attempted. """ config = self.get_config(["--silent", "-a", "account", "-t", "rex"]) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() - self.assertConfigEqual(self.get_content(config), """\ + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() + self.assertConfigEqual( + self.get_content(config), + f"""\ [client] computer_title = rex -data_path = %s +data_path = {config.data_path} account_name = account url = https://landscape.canonical.com/message-system -""" % config.data_path) +""", + ) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_silent_setup_no_register(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_setup_no_register(self, mock_serviceconfig): """ Called with command line options to write a config file but no registration or validation of parameters is attempted. """ config = self.get_config(["--silent", "--no-start"]) setup(config) - self.assertConfigEqual(self.get_content(config), """\ + self.assertConfigEqual( + self.get_content(config), + f"""\ [client] -data_path = %s +data_path = {config.data_path} url = https://landscape.canonical.com/message-system -""" % config.data_path) +""", + ) - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_silent_setup_no_register_with_default_preseed_params( - self, mock_sysvconfig): + self, + mock_serviceconfig, + ): """ Make sure that the configuration can be used to write the configuration file after a fresh install. """ - args = ["--silent", "--no-start", - "--computer-title", "", - "--account-name", "", - "--registration-key", "", - "--url", "https://landscape.canonical.com/message-system", - "--exchange-interval", "900", - "--urgent-exchange-interval", "60", - "--ping-url", "http://landscape.canonical.com/ping", - "--ping-interval", "30", - "--http-proxy", "", - "--https-proxy", "", - "--tags", ""] + args = [ + "--silent", + "--no-start", + "--computer-title", + "", + "--account-name", + "", + "--registration-key", + "", + "--url", + "https://landscape.canonical.com/message-system", + "--exchange-interval", + "900", + "--urgent-exchange-interval", + "60", + "--ping-url", + "http://landscape.canonical.com/ping", + "--ping-interval", + "30", + "--http-proxy", + "", + "--https-proxy", + "", + "--tags", + "", + ] config = self.get_config(args) setup(config) self.assertConfigEqual( @@ -957,7 +913,7 @@ "[client]\n" "http_proxy = \n" "tags = \n" - "data_path = %s\n" + f"data_path = {config.data_path}\n" "registration_key = \n" "account_name = \n" "computer_title = \n" @@ -966,111 +922,166 @@ "exchange_interval = 900\n" "ping_interval = 30\n" "ping_url = http://landscape.canonical.com/ping\n" - "urgent_exchange_interval = 60\n" % config.data_path) + "urgent_exchange_interval = 60\n", + ) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_silent_setup_without_computer_title( - self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_setup_unicode_computer_title(self, mock_serviceconfig): + """ + Setup accepts a non-ascii computer title and registration is + attempted. + """ + config = self.get_config(["--silent", "-a", "account", "-t", "mélody"]) + setup(config) + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() + self.assertConfigEqual( + self.get_content(config), + f"""\ +[client] +computer_title = mélody +data_path = {config.data_path} +account_name = account +url = https://landscape.canonical.com/message-system +""", + ) + + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_setup_without_computer_title(self, mock_serviceconfig): """A computer title is required.""" config = self.get_config(["--silent", "-a", "account"]) self.assertRaises(ConfigurationError, setup, config) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_silent_setup_without_account_name( - self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_setup_without_account_name(self, mock_serviceconfig): """An account name is required.""" config = self.get_config(["--silent", "-t", "rex"]) self.assertRaises(ConfigurationError, setup, config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with( - True) + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) @mock.patch("landscape.client.configuration.input") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_silent_script_users_imply_script_execution_plugin( - self, mock_sysvconfig, mock_input): + self, + mock_serviceconfig, + mock_input, + ): """ If C{--script-users} is specified, without C{ScriptExecution} in the list of manager plugins, it will be automatically added. """ - filename = self.makeFile(""" + filename = self.makeFile( + """ [client] url = https://localhost:8080/message-system bus = session -""") +""", + ) - config = self.get_config(["--config", filename, "--silent", - "-a", "account", "-t", "rex", - "--script-users", "root, nobody"]) - mock_sysvconfig().restart_landscape.return_value = True + config = self.get_config( + [ + "--config", + filename, + "--silent", + "-a", + "account", + "-t", + "rex", + "--script-users", + "root, nobody", + ], + ) + mock_serviceconfig.restart_landscape.return_value = True setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with( - True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() mock_input.assert_not_called() parser = ConfigParser() parser.read(filename) self.assertEqual( - {"url": "https://localhost:8080/message-system", - "bus": "session", - "computer_title": "rex", - "include_manager_plugins": "ScriptExecution", - "script_users": "root, nobody", - "account_name": "account"}, - dict(parser.items("client"))) + { + "url": "https://localhost:8080/message-system", + "bus": "session", + "computer_title": "rex", + "include_manager_plugins": "ScriptExecution", + "script_users": "root, nobody", + "account_name": "account", + }, + dict(parser.items("client")), + ) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_silent_script_users_with_all_user(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_script_users_with_all_user(self, mock_serviceconfig): """ In silent mode, we shouldn't accept invalid users, it should raise a configuration error. """ config = self.get_config( - ["--script-users", "all", - "--include-manager-plugins", "ScriptPlugin", - "-a", "account", - "-t", "rex", - "--silent"]) + [ + "--script-users", + "all", + "--include-manager-plugins", + "ScriptPlugin", + "-a", + "account", + "-t", + "rex", + "--silent", + ], + ) self.assertRaises(ConfigurationError, setup, config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with( - True) + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_silent_setup_with_ping_url(self, mock_sysvconfig): - mock_sysvconfig().restart_landscape.return_value = True - filename = self.makeFile(""" + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_silent_setup_with_ping_url(self, mock_serviceconfig): + mock_serviceconfig.restart_landscape.return_value = True + filename = self.makeFile( + """ [client] ping_url = http://landscape.canonical.com/ping registration_key = shared-secret log_level = debug random_key = random_value -""") +""", + ) - config = self.get_config(["--config", filename, "--silent", - "-a", "account", "-t", "rex", - "--ping-url", "http://localhost/ping"]) + config = self.get_config( + [ + "--config", + filename, + "--silent", + "-a", + "account", + "-t", + "rex", + "--ping-url", + "http://localhost/ping", + ], + ) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with( - True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() parser = ConfigParser() parser.read(filename) self.assertEqual( - {"log_level": "debug", - "registration_key": "shared-secret", - "ping_url": "http://localhost/ping", - "random_key": "random_value", - "computer_title": "rex", - "account_name": "account"}, - dict(parser.items("client"))) + { + "log_level": "debug", + "registration_key": "shared-secret", + "ping_url": "http://localhost/ping", + "random_key": "random_value", + "computer_title": "rex", + "account_name": "account", + }, + dict(parser.items("client")), + ) @mock.patch("landscape.client.configuration.LandscapeSetupScript") def test_setup_with_proxies_from_environment(self, mock_setup_script): os.environ["http_proxy"] = "http://environ" os.environ["https_proxy"] = "https://environ" - filename = self.makeFile("[client]\n" - "url = http://url\n") + filename = self.makeFile("[client]\n" "url = http://url\n") config = self.get_config(["--no-start", "--config", filename]) setup(config) @@ -1083,9 +1094,11 @@ self.assertEqual(config.http_proxy, "http://environ") self.assertEqual(config.https_proxy, "https://environ") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_silent_setup_with_proxies_from_environment( - self, mock_sysvconfig): + self, + mock_serviceconfig, + ): """ Only command-line options are used in silent mode and registration is attempted. @@ -1093,35 +1106,45 @@ os.environ["http_proxy"] = "http://environ" os.environ["https_proxy"] = "https://environ" - filename = self.makeFile(""" + filename = self.makeFile( + """ [client] registration_key = shared-secret -""") - config = self.get_config(["--config", filename, "--silent", - "-a", "account", "-t", "rex"]) +""", + ) + config = self.get_config( + ["--config", filename, "--silent", "-a", "account", "-t", "rex"], + ) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() parser = ConfigParser() parser.read(filename) self.assertEqual( - {"registration_key": "shared-secret", - "http_proxy": "http://environ", - "https_proxy": "https://environ", - "computer_title": "rex", - "account_name": "account"}, - dict(parser.items("client"))) + { + "registration_key": "shared-secret", + "http_proxy": "http://environ", + "https_proxy": "https://environ", + "computer_title": "rex", + "account_name": "account", + }, + dict(parser.items("client")), + ) @mock.patch("landscape.client.configuration.LandscapeSetupScript") def test_setup_prefers_proxies_from_config_over_environment( - self, mock_setup_script): + self, + mock_setup_script, + ): os.environ["http_proxy"] = "http://environ" os.environ["https_proxy"] = "https://environ" - filename = self.makeFile("[client]\n" - "http_proxy = http://config\n" - "https_proxy = https://config\n" - "url = http://url\n") + filename = self.makeFile( + "[client]\n" + "http_proxy = http://config\n" + "https_proxy = https://config\n" + "url = http://url\n", + ) config = self.get_config(["--no-start", "--config", filename]) setup(config) @@ -1136,15 +1159,17 @@ @mock.patch("landscape.client.configuration.input", return_value="n") @mock.patch("landscape.client.configuration.register") @mock.patch("landscape.client.configuration.setup") - def test_main_no_registration( - self, mock_setup, mock_register, mock_input): + def test_main_no_registration(self, mock_setup, mock_register, mock_input): main(["-c", self.make_working_config()], print=noop_print) mock_register.assert_not_called() mock_input.assert_called_once_with( - "\nRequest a new registration for this computer now? [Y/n]: ") + "\nRequest a new registration for this computer now? [Y/n]: ", + ) - @mock.patch("landscape.client.configuration.register", - return_value="success") + @mock.patch( + "landscape.client.configuration.register", + return_value="success", + ) @mock.patch("landscape.client.configuration.setup") def test_main_silent(self, mock_setup, mock_register): """ @@ -1155,22 +1180,31 @@ "[client]\n" "computer_title = Old Title\n" "account_name = Old Name\n" - "registration_key = Old Password\n" - ) + "registration_key = Old Password\n", + ) exception = self.assertRaises( - SystemExit, main, ["-c", config_filename, "--silent"], - print=noop_print) + SystemExit, + main, + ["-c", config_filename, "--silent"], + print=noop_print, + ) self.assertEqual(0, exception.code) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch("landscape.client.configuration.input", return_value="y") - @mock.patch("landscape.client.configuration.register", - return_value="success") + @mock.patch( + "landscape.client.configuration.register", + return_value="success", + ) @mock.patch("landscape.client.configuration.setup") def test_main_user_interaction_success( - self, mock_setup, mock_register, mock_input): + self, + mock_setup, + mock_register, + mock_input, + ): """The successful result of register() is communicated to the user.""" printed = [] @@ -1178,24 +1212,36 @@ printed.append((string, file)) exception = self.assertRaises( - SystemExit, main, ["-c", self.make_working_config()], - print=faux_print) + SystemExit, + main, + ["-c", self.make_working_config()], + print=faux_print, + ) self.assertEqual(0, exception.code) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) mock_input.assert_called_once_with( - "\nRequest a new registration for this computer now? [Y/n]: ") + "\nRequest a new registration for this computer now? [Y/n]: ", + ) self.assertEqual( - [("Please wait...", sys.stdout), - ("System successfully registered.", sys.stdout)], - printed) + [ + ("Registration request sent successfully.", sys.stdout), + ], + printed, + ) @mock.patch("landscape.client.configuration.input", return_value="y") - @mock.patch("landscape.client.configuration.register", - return_value="unknown-account") + @mock.patch( + "landscape.client.configuration.register", + return_value="unknown-account", + ) @mock.patch("landscape.client.configuration.setup") def test_main_user_interaction_failure( - self, mock_setup, mock_register, mock_input): + self, + mock_setup, + mock_register, + mock_input, + ): """The failed result of register() is communicated to the user.""" printed = [] @@ -1203,52 +1249,74 @@ printed.append((string, file)) exception = self.assertRaises( - SystemExit, main, ["-c", self.make_working_config()], - print=faux_print) + SystemExit, + main, + ["-c", self.make_working_config()], + print=faux_print, + ) self.assertEqual(2, exception.code) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) mock_input.assert_called_once_with( - "\nRequest a new registration for this computer now? [Y/n]: ") + "\nRequest a new registration for this computer now? [Y/n]: ", + ) # Note that the error is output via sys.stderr. self.assertEqual( - [("Please wait...", sys.stdout), - ("Invalid account name or registration key.", sys.stderr)], - printed) + [ + ("Invalid account name or registration key.", sys.stderr), + ], + printed, + ) @mock.patch("landscape.client.configuration.input") - @mock.patch("landscape.client.configuration.register", - return_value="success") + @mock.patch( + "landscape.client.configuration.register", + return_value="success", + ) @mock.patch("landscape.client.configuration.setup") def test_main_user_interaction_success_silent( - self, mock_setup, mock_register, mock_input): - """A successful result is communicated to the user even with --silent. - """ + self, + mock_setup, + mock_register, + mock_input, + ): + """Successful result is communicated to the user even with --silent.""" printed = [] def faux_print(string, file=sys.stdout): printed.append((string, file)) exception = self.assertRaises( - SystemExit, main, ["--silent", "-c", self.make_working_config()], - print=faux_print) + SystemExit, + main, + ["--silent", "-c", self.make_working_config()], + print=faux_print, + ) self.assertEqual(0, exception.code) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) mock_input.assert_not_called() self.assertEqual( - [("Please wait...", sys.stdout), - ("System successfully registered.", sys.stdout)], - printed) + [ + ("Registration request sent successfully.", sys.stdout), + ], + printed, + ) @mock.patch("landscape.client.configuration.input") - @mock.patch("landscape.client.configuration.register", - return_value="unknown-account") + @mock.patch( + "landscape.client.configuration.register", + return_value="unknown-account", + ) @mock.patch("landscape.client.configuration.setup") def test_main_user_interaction_failure_silent( - self, mock_setup, mock_register, mock_input): + self, + mock_setup, + mock_register, + mock_input, + ): """ A failure result is communicated to the user even with --silent. """ @@ -1258,134 +1326,156 @@ printed.append((string, file)) exception = self.assertRaises( - SystemExit, main, ["--silent", "-c", self.make_working_config()], - print=faux_print) + SystemExit, + main, + ["--silent", "-c", self.make_working_config()], + print=faux_print, + ) self.assertEqual(2, exception.code) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) mock_input.assert_not_called() # Note that the error is output via sys.stderr. self.assertEqual( - [("Please wait...", sys.stdout), - ("Invalid account name or registration key.", sys.stderr)], - printed) + [ + ("Invalid account name or registration key.", sys.stderr), + ], + printed, + ) def make_working_config(self): data_path = self.makeFile() - return self.makeFile("[client]\n" - "computer_title = Old Title\n" - "account_name = Old Name\n" - "registration_key = Old Password\n" - "http_proxy = http://old.proxy\n" - "https_proxy = https://old.proxy\n" - "data_path = {}\n" - "url = http://url\n".format(data_path)) + return self.makeFile( + "[client]\n" + "computer_title = Old Title\n" + "account_name = Old Name\n" + "registration_key = Old Password\n" + "http_proxy = http://old.proxy\n" + "https_proxy = https://old.proxy\n" + "data_path = {}\n" + "url = http://url\n".format(data_path), + ) @mock.patch("landscape.client.configuration.input", return_value="") @mock.patch("landscape.client.configuration.register") @mock.patch("landscape.client.configuration.LandscapeSetupScript") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_register( - self, mock_sysvconfig, mock_setup_script, mock_register, - mock_input): - mock_sysvconfig().is_configured_to_run.return_value = False + self, + mock_serviceconfig, + mock_setup_script, + mock_register, + mock_input, + ): + mock_serviceconfig.is_configured_to_run.return_value = False self.assertRaises( - SystemExit, main, ["--config", self.make_working_config()], - print=noop_print) - mock_sysvconfig().is_configured_to_run.assert_called_once_with() - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + SystemExit, + main, + ["--config", self.make_working_config()], + print=noop_print, + ) + mock_serviceconfig.is_configured_to_run.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() mock_setup_script().run.assert_called_once_with() mock_register.assert_called_once_with(mock.ANY, mock.ANY) - mock_input.assert_any_call( - "\nThe Landscape client must be started " - "on boot to operate correctly.\n\n" - "Start Landscape client on boot? [Y/n]: ") mock_input.assert_called_with( - "\nRequest a new registration for this computer now? [Y/n]: ") + "\nRequest a new registration for this computer now? [Y/n]: ", + ) @mock.patch("landscape.client.configuration.print_text") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_errors_from_restart_landscape( - self, mock_sysvconfig, mock_print_text): + self, + mock_serviceconfig, + mock_print_text, + ): """ If a ProcessError exception is raised from restart_landscape (because the client failed to be restarted), an informative message is printed and the script exits. """ - mock_sysvconfig().restart_landscape.side_effect = ProcessError() + mock_serviceconfig.restart_landscape.side_effect = ( + ServiceConfigException("Couldn't restart the Landscape client.") + ) config = self.get_config(["--silent", "-a", "account", "-t", "rex"]) system_exit = self.assertRaises(SystemExit, setup, config) self.assertEqual(system_exit.code, 2) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() mock_print_text.assert_any_call( - "Couldn't restart the Landscape client.", error=True) + "Couldn't restart the Landscape client.", + error=True, + ) mock_print_text.assert_called_with( "This machine will be registered with the provided details when " - "the client runs.", error=True) + "the client runs.", + error=True, + ) @mock.patch("landscape.client.configuration.print_text") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_errors_from_restart_landscape_ok_no_register( - self, mock_sysvconfig, mock_print_text): + self, + mock_serviceconfig, + mock_print_text, + ): """ Exit code 0 will be returned if the client fails to be restarted and --ok-no-register was passed. """ - mock_sysvconfig().restart_landscape.side_effect = ProcessError() + mock_serviceconfig.restart_landscape.side_effect = ( + ServiceConfigException("Couldn't restart the Landscape client.") + ) - config = self.get_config(["--silent", "-a", "account", "-t", "rex", - "--ok-no-register"]) + config = self.get_config( + ["--silent", "-a", "account", "-t", "rex", "--ok-no-register"], + ) system_exit = self.assertRaises(SystemExit, setup, config) self.assertEqual(system_exit.code, 0) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() mock_print_text.assert_any_call( - "Couldn't restart the Landscape client.", error=True) + "Couldn't restart the Landscape client.", + error=True, + ) mock_print_text.assert_called_with( "This machine will be registered with the provided details when " - "the client runs.", error=True) + "the client runs.", + error=True, + ) @mock.patch("landscape.client.configuration.input", return_value="") @mock.patch("landscape.client.configuration.register") @mock.patch("landscape.client.configuration.setup") - def test_main_with_register( - self, mock_setup, mock_register, mock_input): - self.assertRaises(SystemExit, main, ["-c", self.make_working_config()], - print=noop_print) + def test_main_with_register(self, mock_setup, mock_register, mock_input): + self.assertRaises( + SystemExit, + main, + ["-c", self.make_working_config()], + print=noop_print, + ) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) mock_input.assert_called_once_with( - "\nRequest a new registration for this computer now? [Y/n]: ") - - @mock.patch("landscape.client.configuration.SysVConfig") - def test_setup_init_script_and_start_client( - self, mock_sysvconfig): - setup_init_script_and_start_client() - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - - @mock.patch("landscape.client.configuration.input") - @mock.patch("landscape.client.configuration.SysVConfig") - def test_setup_init_script_and_start_client_silent( - self, mock_sysvconfig, mock_input): - setup_init_script_and_start_client() - mock_input.assert_not_called() - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) + "\nRequest a new registration for this computer now? [Y/n]: ", + ) @mock.patch("landscape.client.configuration.input") @mock.patch("landscape.client.configuration.register") @mock.patch("landscape.client.configuration.setup") - def test_register_silent( - self, mock_setup, mock_register, mock_input): + def test_register_silent(self, mock_setup, mock_register, mock_input): """ Silent registration uses specified configuration to attempt a registration with the server. """ self.assertRaises( - SystemExit, main, ["--silent", "-c", self.make_working_config()], - print=noop_print) + SystemExit, + main, + ["--silent", "-c", self.make_working_config()], + print=noop_print, + ) mock_setup.assert_called_once_with(mock.ANY) mock_register.assert_called_once_with(mock.ANY, mock.ANY) mock_input.assert_not_called() @@ -1393,24 +1483,27 @@ @mock.patch("landscape.client.configuration.input") @mock.patch("landscape.client.configuration.register") @mock.patch( - "landscape.client.configuration.stop_client_and_disable_init_script") - def test_disable( - self, mock_stop_client, mock_register, mock_input): + "landscape.client.configuration.stop_client_and_disable_init_script", + ) + def test_disable(self, mock_stop_client, mock_register, mock_input): main(["--disable", "-c", self.make_working_config()]) mock_stop_client.assert_called_once_with() mock_register.assert_not_called() mock_input.assert_not_called() - @mock.patch("landscape.client.configuration.SysVConfig") - def test_stop_client_and_disable_init_scripts(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_stop_client_and_disable_init_scripts(self, mock_serviceconfig): main(["--disable", "-c", self.make_working_config()]) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(False) - mock_sysvconfig().stop_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(False) + mock_serviceconfig.stop_landscape.assert_called_once_with() def test_non_root(self): self.mock_getuid.return_value = 1000 - sys_exit = self.assertRaises(SystemExit, - main, ["-c", self.make_working_config()]) + sys_exit = self.assertRaises( + SystemExit, + main, + ["-c", self.make_working_config()], + ) self.mock_getuid.assert_called_once_with() self.assertIn("landscape-config must be run as root", str(sys_exit)) @@ -1420,7 +1513,9 @@ self.mock_getuid.return_value = 1000 self.assertRaises(SystemExit, main, ["--help"]) self.assertIn( - "show this help message and exit", mock_stdout.getvalue()) + "show this help message and exit", + mock_stdout.getvalue(), + ) @mock.patch("sys.stdout", new_callable=StringIO) def test_main_with_help_and_non_root_short(self, mock_stdout): @@ -1428,10 +1523,12 @@ self.mock_getuid.return_value = 1000 self.assertRaises(SystemExit, main, ["-h"]) self.assertIn( - "show this help message and exit", mock_stdout.getvalue()) + "show this help message and exit", + mock_stdout.getvalue(), + ) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_import_from_file(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_import_from_file(self, mock_serviceconfig): configuration = ( "[client]\n" "computer_title = New Title\n" @@ -1439,29 +1536,43 @@ "registration_key = New Password\n" "http_proxy = http://new.proxy\n" "https_proxy = https://new.proxy\n" - "url = http://new.url\n") + "url = http://new.url\n" + ) - import_filename = self.makeFile(configuration, - basename="import_config") + import_filename = self.makeFile( + configuration, + basename="import_config", + ) config_filename = self.makeFile("", basename="final_config") - config = self.get_config(["--config", config_filename, "--silent", - "--import", import_filename]) + config = self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + ], + ) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() options = ConfigParser() options.read(config_filename) - self.assertEqual(dict(options.items("client")), - {"computer_title": "New Title", - "account_name": "New Name", - "registration_key": "New Password", - "http_proxy": "http://new.proxy", - "https_proxy": "https://new.proxy", - "url": "http://new.url"}) + self.assertEqual( + dict(options.items("client")), + { + "computer_title": "New Title", + "account_name": "New Name", + "registration_key": "New Password", + "http_proxy": "http://new.proxy", + "https_proxy": "https://new.proxy", + "url": "http://new.url", + }, + ) def test_import_from_empty_file(self): config_filename = self.makeFile("", basename="final_config") @@ -1469,11 +1580,20 @@ # Use a command line option as well to test the precedence. try: - self.get_config(["--config", config_filename, "--silent", - "--import", import_filename]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + ], + ) except ImportOptionError as error: - self.assertEqual(str(error), - "Nothing to import at %s." % import_filename) + self.assertEqual( + str(error), + f"Nothing to import at {import_filename}.", + ) else: self.fail("ImportOptionError not raised") @@ -1483,43 +1603,75 @@ # Use a command line option as well to test the precedence. try: - self.get_config(["--config", config_filename, "--silent", - "--import", import_filename]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + ], + ) except ImportOptionError as error: - self.assertEqual(str(error), - "File %s doesn't exist." % import_filename) + self.assertEqual( + str(error), + f"File {import_filename} doesn't exist.", + ) else: self.fail("ImportOptionError not raised") def test_import_from_file_with_empty_client_section(self): old_configuration = "[client]\n" - config_filename = self.makeFile("", old_configuration, - basename="final_config") + config_filename = self.makeFile( + "", + old_configuration, + basename="final_config", + ) import_filename = self.makeFile("", basename="import_config") # Use a command line option as well to test the precedence. try: - self.get_config(["--config", config_filename, "--silent", - "--import", import_filename]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + ], + ) except ImportOptionError as error: - self.assertEqual(str(error), - "Nothing to import at %s." % import_filename) + self.assertEqual( + str(error), + f"Nothing to import at {import_filename}.", + ) else: self.fail("ImportOptionError not raised") def test_import_from_bogus_file(self): config_filename = self.makeFile("", basename="final_config") - import_filename = self.makeFile("BOGUS!", - basename="import_config") + import_filename = self.makeFile( + "BOGUS!", + basename="import_config", + ) # Use a command line option as well to test the precedence. try: - self.get_config(["--config", config_filename, "--silent", - "--import", import_filename]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + ], + ) except ImportOptionError as error: - self.assertIn("Nothing to import at %s" % import_filename, - str(error)) + self.assertIn( + f"Nothing to import at {import_filename}", + str(error), + ) else: self.fail("ImportOptionError not raised") @@ -1529,17 +1681,23 @@ specified file. """ import_filename = self.makeFile( - "[client]\nfoo=bar", basename="import_config") + "[client]\nfoo=bar", + basename="import_config", + ) # Remove read permissions os.chmod(import_filename, os.stat(import_filename).st_mode - 0o444) error = self.assertRaises( - ImportOptionError, self.get_config, ["--import", import_filename]) - expected_message = ("Couldn't read configuration from %s." % - import_filename) + ImportOptionError, + self.get_config, + ["--import", import_filename], + ) + expected_message = ( + f"Couldn't read configuration from {import_filename}." + ) self.assertEqual(str(error), expected_message) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_import_from_file_preserves_old_options(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_import_from_file_preserves_old_options(self, mock_serviceconfig): old_configuration = ( "[client]\n" "computer_title = Old Title\n" @@ -1547,40 +1705,58 @@ "registration_key = Old Password\n" "http_proxy = http://old.proxy\n" "https_proxy = https://old.proxy\n" - "url = http://old.url\n") + "url = http://old.url\n" + ) new_configuration = ( "[client]\n" "account_name = New Name\n" "registration_key = New Password\n" - "url = http://new.url\n") + "url = http://new.url\n" + ) - config_filename = self.makeFile(old_configuration, - basename="final_config") - import_filename = self.makeFile(new_configuration, - basename="import_config") + config_filename = self.makeFile( + old_configuration, + basename="final_config", + ) + import_filename = self.makeFile( + new_configuration, + basename="import_config", + ) # Use a command line option as well to test the precedence. - config = self.get_config(["--config", config_filename, "--silent", - "--import", import_filename, - "-p", "Command Line Password"]) + config = self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + "-p", + "Command Line Password", + ], + ) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() options = ConfigParser() options.read(config_filename) - self.assertEqual(dict(options.items("client")), - {"computer_title": "Old Title", - "account_name": "New Name", - "registration_key": "Command Line Password", - "http_proxy": "http://old.proxy", - "https_proxy": "https://old.proxy", - "url": "http://new.url"}) + self.assertEqual( + dict(options.items("client")), + { + "computer_title": "Old Title", + "account_name": "New Name", + "registration_key": "Command Line Password", + "http_proxy": "http://old.proxy", + "https_proxy": "https://old.proxy", + "url": "http://new.url", + }, + ) - @mock.patch("landscape.client.configuration.SysVConfig") - def test_import_from_file_may_reset_old_options(self, mock_sysvconfig): + @mock.patch("landscape.client.configuration.ServiceConfig") + def test_import_from_file_may_reset_old_options(self, mock_serviceconfig): """ This test ensures that setting an empty option in an imported configuration file will actually set the local value to empty @@ -1591,38 +1767,56 @@ "computer_title = Old Title\n" "account_name = Old Name\n" "registration_key = Old Password\n" - "url = http://old.url\n") + "url = http://old.url\n" + ) - new_configuration = ( - "[client]\n" - "registration_key =\n") + new_configuration = "[client]\n" "registration_key =\n" - config_filename = self.makeFile(old_configuration, - basename="final_config") - import_filename = self.makeFile(new_configuration, - basename="import_config") + config_filename = self.makeFile( + old_configuration, + basename="final_config", + ) + import_filename = self.makeFile( + new_configuration, + basename="import_config", + ) - config = self.get_config(["--config", config_filename, "--silent", - "--import", import_filename]) + config = self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + import_filename, + ], + ) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() options = ConfigParser() options.read(config_filename) - self.assertEqual(dict(options.items("client")), - {"computer_title": "Old Title", - "account_name": "Old Name", - "registration_key": "", # <== - "url": "http://old.url"}) + self.assertEqual( + dict(options.items("client")), + { + "computer_title": "Old Title", + "account_name": "Old Name", + "registration_key": "", # <== + "url": "http://old.url", + }, + ) @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.fetch") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_import_from_url( - self, mock_sysvconfig, mock_fetch, mock_print_text): - mock_sysvconfig().restart_landscape.return_value = True + self, + mock_serviceconfig, + mock_fetch, + mock_print_text, + ): + mock_serviceconfig.restart_landscape.return_value = True configuration = ( b"[client]\n" b"computer_title = New Title\n" @@ -1630,189 +1824,300 @@ b"registration_key = New Password\n" b"http_proxy = http://new.proxy\n" b"https_proxy = https://new.proxy\n" - b"url = http://new.url\n") + b"url = http://new.url\n" + ) mock_fetch.return_value = configuration config_filename = self.makeFile("", basename="final_config") - config = self.get_config(["--config", config_filename, "--silent", - "--import", "https://config.url"]) + config = self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + "https://config.url", + ], + ) setup(config) mock_fetch.assert_called_once_with("https://config.url") mock_print_text.assert_called_once_with( - "Fetching configuration from https://config.url...") - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + "Fetching configuration from https://config.url...", + ) + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() options = ConfigParser() options.read(config_filename) - self.assertEqual(dict(options.items("client")), - {"computer_title": "New Title", - "account_name": "New Name", - "registration_key": "New Password", - "http_proxy": "http://new.proxy", - "https_proxy": "https://new.proxy", - "url": "http://new.url"}) + self.assertEqual( + dict(options.items("client")), + { + "computer_title": "New Title", + "account_name": "New Name", + "registration_key": "New Password", + "http_proxy": "http://new.proxy", + "https_proxy": "https://new.proxy", + "url": "http://new.url", + }, + ) @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.fetch") def test_import_from_url_with_http_code_fetch_error( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): mock_fetch.side_effect = HTTPCodeError(501, "") config_filename = self.makeFile("", basename="final_config") try: - self.get_config(["--config", config_filename, "--silent", - "--import", "https://config.url"]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + "https://config.url", + ], + ) except ImportOptionError as error: - self.assertEqual(str(error), - "Couldn't download configuration from " - "https://config.url: Server " - "returned HTTP code 501") + self.assertEqual( + str(error), + "Couldn't download configuration from " + "https://config.url: Server " + "returned HTTP code 501", + ) else: self.fail("ImportOptionError not raised") mock_fetch.assert_called_once_with("https://config.url") mock_print_text.assert_called_once_with( - "Fetching configuration from https://config.url...") + "Fetching configuration from https://config.url...", + ) @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.fetch") def test_import_from_url_with_pycurl_error( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): mock_fetch.side_effect = PyCurlError(60, "pycurl message") config_filename = self.makeFile("", basename="final_config") try: - self.get_config(["--config", config_filename, "--silent", - "--import", "https://config.url"]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--import", + "https://config.url", + ], + ) except ImportOptionError as error: - self.assertEqual(str(error), - "Couldn't download configuration from " - "https://config.url: Error 60: pycurl message") + self.assertEqual( + str(error), + "Couldn't download configuration from " + "https://config.url: Error 60: pycurl message", + ) else: self.fail("ImportOptionError not raised") mock_fetch.assert_called_once_with("https://config.url") mock_print_text.assert_called_once_with( - "Fetching configuration from https://config.url...") + "Fetching configuration from https://config.url...", + ) @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.fetch", return_value=b"") def test_import_from_url_with_empty_content( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): # Use a command line option as well to test the precedence. try: self.get_config(["--silent", "--import", "https://config.url"]) except ImportOptionError as error: - self.assertEqual(str(error), - "Nothing to import at https://config.url.") + self.assertEqual( + str(error), + "Nothing to import at https://config.url.", + ) else: self.fail("ImportOptionError not raised") mock_fetch.assert_called_once_with("https://config.url") mock_print_text.assert_called_once_with( - "Fetching configuration from https://config.url...") + "Fetching configuration from https://config.url...", + ) @mock.patch("landscape.client.configuration.print_text") - @mock.patch("landscape.client.configuration.fetch", - return_value=b"BOGUS!") + @mock.patch( + "landscape.client.configuration.fetch", + return_value=b"BOGUS!", + ) def test_import_from_url_with_bogus_content( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): # Use a command line option as well to test the precedence. try: self.get_config(["--silent", "--import", "https://config.url"]) except ImportOptionError as error: - self.assertEqual("Nothing to import at https://config.url.", - str(error)) + self.assertEqual( + "Nothing to import at https://config.url.", + str(error), + ) else: self.fail("ImportOptionError not raised") mock_fetch.assert_called_once_with("https://config.url") mock_print_text.assert_called_once_with( - "Fetching configuration from https://config.url...") + "Fetching configuration from https://config.url...", + ) @mock.patch("landscape.client.configuration.print_text") - @mock.patch("landscape.client.configuration.fetch", - side_effect=HTTPCodeError(404, "")) + @mock.patch( + "landscape.client.configuration.fetch", + side_effect=HTTPCodeError(404, ""), + ) def test_import_error_is_handled_nicely_by_main( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): system_exit = self.assertRaises( - SystemExit, main, ["--import", "https://config.url"]) + SystemExit, + main, + ["--import", "https://config.url"], + ) self.assertEqual(system_exit.code, 1) mock_fetch.assert_called_once_with("https://config.url") mock_print_text.assert_any_call( - "Fetching configuration from https://config.url...") + "Fetching configuration from https://config.url...", + ) mock_print_text.assert_called_with( "Couldn't download configuration from https://config.url: " - "Server returned HTTP code 404", error=True) + "Server returned HTTP code 404", + error=True, + ) @mock.patch("landscape.client.configuration.print_text") - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_base64_ssl_public_key_is_exported_to_file( - self, mock_sysvconfig, mock_print_text): - mock_sysvconfig().restart_landscape.return_value = True + self, + mock_serviceconfig, + mock_print_text, + ): + mock_serviceconfig.restart_landscape.return_value = True data_path = self.makeDir() - config_filename = self.makeFile("[client]\ndata_path=%s" % data_path) + config_filename = self.makeFile(f"[client]\ndata_path={data_path}") key_filename = os.path.join( data_path, - os.path.basename(config_filename) + ".ssl_public_key") + os.path.basename(config_filename) + ".ssl_public_key", + ) - config = self.get_config(["--silent", "-c", config_filename, - "-u", "url", "-a", "account", "-t", "title", - "--ssl-public-key", "base64:SGkgdGhlcmUh"]) + config = self.get_config( + [ + "--silent", + "-c", + config_filename, + "-u", + "url", + "-a", + "account", + "-t", + "title", + "--ssl-public-key", + "base64:SGkgdGhlcmUh", + ], + ) config.data_path = data_path setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() mock_print_text.assert_called_once_with( - "Writing SSL CA certificate to %s..." % key_filename) - self.assertEqual("Hi there!", open(key_filename, "r").read()) + f"Writing SSL CA certificate to {key_filename}...", + ) + self.assertEqual("Hi there!", open(key_filename).read()) options = ConfigParser() options.read(config_filename) - self.assertEqual(options.get("client", "ssl_public_key"), - key_filename) + self.assertEqual(options.get("client", "ssl_public_key"), key_filename) - @mock.patch("landscape.client.configuration.SysVConfig") + @mock.patch("landscape.client.configuration.ServiceConfig") def test_normal_ssl_public_key_is_not_exported_to_file( - self, mock_sysvconfig): - mock_sysvconfig().restart_landscape.return_value = True + self, + mock_serviceconfig, + ): + mock_serviceconfig.restart_landscape.return_value = True config_filename = self.makeFile("") - config = self.get_config(["--silent", "-c", config_filename, - "-u", "url", "-a", "account", "-t", "title", - "--ssl-public-key", "/some/filename"]) + config = self.get_config( + [ + "--silent", + "-c", + config_filename, + "-u", + "url", + "-a", + "account", + "-t", + "title", + "--ssl-public-key", + "/some/filename", + ], + ) setup(config) - mock_sysvconfig().set_start_on_boot.assert_called_once_with(True) - mock_sysvconfig().restart_landscape.assert_called_once_with() + mock_serviceconfig.set_start_on_boot.assert_called_once_with(True) + mock_serviceconfig.restart_landscape.assert_called_once_with() key_filename = config_filename + ".ssl_public_key" self.assertFalse(os.path.isfile(key_filename)) options = ConfigParser() options.read(config_filename) - self.assertEqual(options.get("client", "ssl_public_key"), - "/some/filename") + self.assertEqual( + options.get("client", "ssl_public_key"), + "/some/filename", + ) # We test them individually since they must work individually. @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.fetch") def test_import_from_url_honors_http_proxy( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): self.ensure_import_from_url_honors_proxy_options( - "http_proxy", mock_fetch, mock_print_text) + "http_proxy", + mock_fetch, + mock_print_text, + ) @mock.patch("landscape.client.configuration.print_text") @mock.patch("landscape.client.configuration.fetch") def test_import_from_url_honors_https_proxy( - self, mock_fetch, mock_print_text): + self, + mock_fetch, + mock_print_text, + ): self.ensure_import_from_url_honors_proxy_options( - "https_proxy", mock_fetch, mock_print_text) + "https_proxy", + mock_fetch, + mock_print_text, + ) def ensure_import_from_url_honors_proxy_options( - self, proxy_option, mock_fetch, mock_print_text): - + self, + proxy_option, + mock_fetch, + mock_print_text, + ): def check_proxy(url): self.assertEqual("https://config.url", url) self.assertEqual(os.environ.get(proxy_option), "http://proxy") @@ -1824,20 +2129,27 @@ config_filename = self.makeFile("", basename="final_config") try: - self.get_config(["--config", config_filename, "--silent", - "--" + proxy_option.replace("_", "-"), - "http://proxy", - "--import", "https://config.url"]) + self.get_config( + [ + "--config", + config_filename, + "--silent", + "--" + proxy_option.replace("_", "-"), + "http://proxy", + "--import", + "https://config.url", + ], + ) except ImportOptionError: # The returned content is empty. We don't really care for # this test. pass mock_print_text.assert_called_once_with( - "Fetching configuration from https://config.url...") + "Fetching configuration from https://config.url...", + ) -class FakeConnectorFactory(object): - +class FakeConnectorFactory: def __init__(self, remote): self.remote = remote @@ -1858,7 +2170,7 @@ helpers = [FakeBrokerServiceHelper] def setUp(self): - super(RegisterRealFunctionTest, self).setUp() + super().setUp() self.config = LandscapeSetupConfiguration() self.config.load(["-c", self.config_filename]) @@ -1866,7 +2178,11 @@ self.reactor.call_later(0, self.reactor.fire, "registration-done") connector_factory = FakeConnectorFactory(self.remote) result = register( - self.config, self.reactor, connector_factory, max_retries=99) + self.config, + self.reactor, + connector_factory, + max_retries=99, + ) self.assertEqual("success", result) def test_register_registration_error(self): @@ -1883,24 +2199,27 @@ connector_factory = FakeConnectorFactory(self.remote) result = register( - config=self.config, reactor=self.reactor, - connector_factory=connector_factory, max_retries=99) + config=self.config, + reactor=self.reactor, + connector_factory=connector_factory, + max_retries=99, + ) self.assertEqual("max-pending-computers", result) -class FauxConnection(object): +class FauxConnection: def __init__(self): self.callbacks = [] self.errbacks = [] - def addCallback(self, func, *args, **kws): + def addCallback(self, func, *args, **kws): # noqa: N802 self.callbacks.append(func) - def addErrback(self, func, *args, **kws): + def addErrback(self, func, *args, **kws): # noqa: N802 self.errbacks.append(func) -class FauxConnector(object): +class FauxConnector: was_disconnected = False @@ -1922,18 +2241,18 @@ helpers = [RemoteBrokerHelper] def setUp(self): - super(RegisterFunctionTest, self).setUp() + super().setUp() self.config = LandscapeSetupConfiguration() self.config.load(["-c", self.config_filename]) def test_register(self): """Is the async machinery wired up properly?""" - class FauxFailure(object): - def getTraceback(self): - return 'traceback' + class FauxFailure: + def getTraceback(self): # noqa: N802 + return "traceback" - class FauxReactor(object): + class FauxReactor: def run(self): self.was_run = True @@ -1947,20 +2266,27 @@ return connector # We pre-seed a success because no actual result will be generated. - register(self.config, reactor, connector_factory, max_retries=99, - results=['success']) + register( + self.config, + reactor, + connector_factory, + max_retries=99, + results=["success"], + ) self.assertTrue(reactor.was_run) # Only a single callback is registered, it does the real work when a # connection is established. self.assertTrue(1, len(connector.connection.callbacks)) self.assertEqual( - 'got_connection', - connector.connection.callbacks[0].func.__name__) + "got_connection", + connector.connection.callbacks[0].func.__name__, + ) # Should something go wrong, there is an error handler registered. self.assertTrue(1, len(connector.connection.errbacks)) self.assertEqual( - 'got_error', - connector.connection.errbacks[0].func.__name__) + "got_error", + connector.connection.errbacks[0].func.__name__, + ) # We ask for retries because networks aren't reliable. self.assertEqual(99, connector.max_retries) @@ -1976,8 +2302,11 @@ return FauxConnector(reactor, self.config) # We pre-seed a success because no actual result will be generated. - register(self.config, connector_factory=connector_factory, - results=["success"]) + register( + self.config, + connector_factory=connector_factory, + results=["success"], + ) mock_reactor.assert_called_once_with() mock_reactor().run.assert_called_once_with() @@ -1987,7 +2316,7 @@ def faux_got_connection(add_result, remote, connector, reactor): pass - class FauxRemote(object): + class FauxRemote: handlers = None deferred = None @@ -2002,38 +2331,38 @@ self.register_deferred = FauxRegisterDeferred() return self.register_deferred - class FauxCallOnEventDeferred(object): + class FauxCallOnEventDeferred: def __init__(self): self.callbacks = [] self.errbacks = [] - def addCallbacks(self, *funcs, **kws): + def addCallbacks(self, *funcs, **kws): # noqa: N802 self.callbacks.extend(funcs) - class FauxRegisterDeferred(object): + class FauxRegisterDeferred: def __init__(self): self.callbacks = [] self.errbacks = [] - def addCallback(self, func): + def addCallback(self, func): # noqa: N802 assert func.__name__ == "got_connection", "Wrong callback." self.callbacks.append(faux_got_connection) self.gather_results_deferred = GatherResultsDeferred() return self.gather_results_deferred - def addCallbacks(self, *funcs, **kws): + def addCallbacks(self, *funcs, **kws): # noqa: N802 self.callbacks.extend(funcs) - def addErrback(self, func, *args, **kws): + def addErrback(self, func, *args, **kws): # noqa: N802 self.errbacks.append(func) return self - class GatherResultsDeferred(object): + class GatherResultsDeferred: def __init__(self): self.callbacks = [] self.errbacks = [] - def addCallbacks(self, *funcs, **kws): + def addCallbacks(self, *funcs, **kws): # noqa: N802 self.callbacks.extend(funcs) faux_connector = FauxConnector(self.reactor, self.config) @@ -2041,24 +2370,33 @@ status_results = [] faux_remote = FauxRemote() results = got_connection( - status_results.append, faux_connector, self.reactor, faux_remote) + status_results.append, + faux_connector, + self.reactor, + faux_remote, + ) # We set up two deferreds, one for the RPC call and one for event # handlers. self.assertEqual(2, len(results.resultList)) # Handlers are registered for the events we are interested in. self.assertCountEqual( - ['registration-failed', 'exchange-failed', 'registration-done'], - faux_remote.handlers.keys()) + ["registration-failed", "exchange-failed", "registration-done"], + faux_remote.handlers.keys(), + ) self.assertCountEqual( - ['failure', 'exchange_failure', 'success'], - [handler.func.__name__ - for handler in faux_remote.handlers.values()]) + ["failure", "exchange_failure", "success"], + [ + handler.func.__name__ + for handler in faux_remote.handlers.values() + ], + ) # We include a single error handler to react to exchange errors. self.assertTrue(1, len(faux_remote.register_deferred.errbacks)) # the handle_registration_errors is wrapped in a partial() self.assertEqual( - 'handle_registration_errors', - faux_remote.register_deferred.errbacks[0].func.__name__) + "handle_registration_errors", + faux_remote.register_deferred.errbacks[0].func.__name__, + ) def test_register_with_on_error_and_an_error(self): """A caller-provided on_error callable will be called if errors occur. @@ -2066,6 +2404,7 @@ The on_error parameter is provided for the client charm which calls register() directly and provides on_error as a keyword argument. """ + def faux_got_connection(add_result, remote, connector, reactor): add_result("something bad") @@ -2077,13 +2416,17 @@ on_error_was_called.append(True) self.reactor.call_later(1, self.reactor.stop) - register(self.config, reactor=self.reactor, on_error=on_error, - got_connection=faux_got_connection) + register( + self.config, + reactor=self.reactor, + on_error=on_error, + got_connection=faux_got_connection, + ) self.assertTrue(on_error_was_called) def test_register_with_on_error_and_no_error(self): - """A caller-provided on_error callable will not be called if no error. - """ + """Caller-provided on_error callable will not be called if no error.""" + def faux_got_connection(add_result, remote, connector, reactor): add_result("success") @@ -2093,23 +2436,32 @@ on_error_was_called.append(True) self.reactor.call_later(1, self.reactor.stop) - register(self.config, reactor=self.reactor, on_error=on_error, - got_connection=faux_got_connection) + register( + self.config, + reactor=self.reactor, + on_error=on_error, + got_connection=faux_got_connection, + ) self.assertFalse(on_error_was_called) def test_register_happy_path(self): """A successful result provokes no exceptions.""" + def faux_got_connection(add_result, remote, connector, reactor): - add_result('success') + add_result("success") + self.reactor.call_later(1, self.reactor.stop) self.assertEqual( "success", - register(self.config, reactor=self.reactor, - got_connection=faux_got_connection)) + register( + self.config, + reactor=self.reactor, + got_connection=faux_got_connection, + ), + ) class SSLCertificateDataTest(LandscapeConfigurationTest): - @mock.patch("landscape.client.configuration.print_text") def test_store_public_key_data(self, mock_print_text): """ @@ -2121,17 +2473,20 @@ os.mkdir(config.data_path) key_filename = os.path.join( config.data_path, - os.path.basename(config.get_config_filename()) + ".ssl_public_key") + os.path.basename(config.get_config_filename()) + ".ssl_public_key", + ) - self.assertEqual(key_filename, - store_public_key_data(config, b"123456789")) + self.assertEqual( + key_filename, + store_public_key_data(config, b"123456789"), + ) self.assertEqual(b"123456789", read_binary_file(key_filename)) mock_print_text.assert_called_once_with( - "Writing SSL CA certificate to %s..." % key_filename) + f"Writing SSL CA certificate to {key_filename}...", + ) class ReportRegistrationOutcomeTest(unittest.TestCase): - def setUp(self): self.result = [] self.output = [] @@ -2142,7 +2497,7 @@ def test_success_case(self): report_registration_outcome("success", print=self.record_result) - self.assertIn("System successfully registered.", self.result) + self.assertIn("Registration request sent successfully.", self.result) self.assertIn(sys.stdout.name, self.output) def test_unknown_account_case(self): @@ -2151,7 +2506,9 @@ returned. """ report_registration_outcome( - "unknown-account", print=self.record_result) + "unknown-account", + print=self.record_result, + ) self.assertIn("Invalid account name or registration key.", self.result) self.assertIn(sys.stderr.name, self.output) @@ -2161,38 +2518,46 @@ returned. """ report_registration_outcome( - "max-pending-computers", print=self.record_result) + "max-pending-computers", + print=self.record_result, + ) messages = ( "Maximum number of computers pending approval reached. ", "Login to your Landscape server account page to manage pending " - "computer approvals.") + "computer approvals.", + ) self.assertIn(messages, self.result) self.assertIn(sys.stderr.name, self.output) def test_ssl_error_case(self): report_registration_outcome("ssl-error", print=self.record_result) self.assertIn( - ("\nThe server's SSL information is incorrect, or fails " - "signature verification!\n" - "If the server is using a self-signed certificate, " - "please ensure you supply it with the --ssl-public-key " - "parameter."), - self.result) + ( + "\nThe server's SSL information is incorrect, or fails " + "signature verification!\n" + "If the server is using a self-signed certificate, " + "please ensure you supply it with the --ssl-public-key " + "parameter." + ), + self.result, + ) self.assertIn(sys.stderr.name, self.output) def test_non_ssl_error_case(self): report_registration_outcome("non-ssl-error", print=self.record_result) self.assertIn( - ("\nWe were unable to contact the server.\n" - "Your internet connection may be down. " - "The landscape client will continue to try and contact " - "the server periodically."), - self.result) + ( + "\nWe were unable to contact the server.\n" + "Your internet connection may be down. " + "The landscape client will continue to try and contact " + "the server periodically." + ), + self.result, + ) self.assertIn(sys.stderr.name, self.output) class DetermineExitCodeTest(unittest.TestCase): - def test_success_means_exit_code_0(self): """ When passed "success" the determine_exit_code function returns 0. @@ -2206,8 +2571,11 @@ 2. """ failure_codes = [ - "unknown-account", "max-computers-count", "ssl-error", - "non-ssl-error"] + "unknown-account", + "max-computers-count", + "ssl-error", + "non-ssl-error", + ] for code in failure_codes: result = determine_exit_code(code) self.assertEqual(2, result) @@ -2218,7 +2586,7 @@ helpers = [BrokerConfigurationHelper] def setUp(self): - super(IsRegisteredTest, self).setUp() + super().setUp() persist_file = os.path.join(self.config.data_path, "broker.bpickle") self.persist = Persist(filename=persist_file) @@ -2242,48 +2610,53 @@ helpers = [BrokerConfigurationHelper] def setUp(self): - super(RegistrationInfoTest, self).setUp() - self.custom_args = ['hello.py'] # Fake python script name - self.account_name = 'world' + super().setUp() + self.custom_args = ["hello.py"] # Fake python script name + self.account_name = "world" self.data_path = self.makeDir() - self.config_text = textwrap.dedent(""" + self.config_text = textwrap.dedent( + """ [client] computer_title = hello account_name = {} data_path = {} - """.format(self.account_name, self.data_path)) + """.format( + self.account_name, + self.data_path, + ), + ) def test_not_registered(self): - '''False when client is not registered''' + """False when client is not registered""" config_filename = self.config.default_config_filenames[0] self.makeFile(self.config_text, path=config_filename) self.config.load(self.custom_args) text = registration_info_text(self.config, False) - self.assertIn('False', text) + self.assertIn("False", text) self.assertNotIn(self.account_name, text) def test_registered(self): - ''' + """ When client is registered, then the text should display as True and account name should be present - ''' + """ config_filename = self.config.default_config_filenames[0] self.makeFile(self.config_text, path=config_filename) self.config.load(self.custom_args) text = registration_info_text(self.config, True) - self.assertIn('True', text) + self.assertIn("True", text) self.assertIn(self.account_name, text) def test_custom_config_path(self): - '''The custom config path should show up in the text''' + """The custom config path should show up in the text""" custom_path = self.makeFile(self.config_text) - self.custom_args += ['-c', custom_path] + self.custom_args += ["-c", custom_path] self.config.load(self.custom_args) text = registration_info_text(self.config, False) self.assertIn(custom_path, text) def test_data_path(self): - '''The config data path should show in the text''' + """The config data path should show in the text""" config_filename = self.config.default_config_filenames[0] self.makeFile(self.config_text, path=config_filename) self.config.load(self.custom_args) @@ -2291,16 +2664,22 @@ self.assertIn(self.data_path, text) def test_registered_exit_code(self): - '''Returns exit code zero when client is registered''' - Identity.secure_id = 'test' # Simulate successful registration + """Returns exit code zero when client is registered""" + Identity.secure_id = "test" # Simulate successful registration exception = self.assertRaises( - SystemExit, main, ["--is-registered", "--silent"], - print=noop_print) + SystemExit, + main, + ["--is-registered", "--silent"], + print=noop_print, + ) self.assertEqual(0, exception.code) def test_not_registered_exit_code(self): - '''Returns special return code when client is not registered''' + """Returns special return code when client is not registered""" exception = self.assertRaises( - SystemExit, main, ["--is-registered", "--silent"], - print=noop_print) + SystemExit, + main, + ["--is-registered", "--silent"], + print=noop_print, + ) self.assertEqual(EXIT_NOT_REGISTERED, exception.code) diff -Nru landscape-client-23.02/landscape/client/tests/test_deployment.py landscape-client-23.08/landscape/client/tests/test_deployment.py --- landscape-client-23.02/landscape/client/tests/test_deployment.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_deployment.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,12 @@ -import mock - -from landscape.lib.fs import read_text_file, create_text_file - -from landscape.client.deployment import ( - BaseConfiguration, Configuration, get_versioned_persist, - init_logging) +from unittest import mock +from landscape.client.deployment import BaseConfiguration +from landscape.client.deployment import Configuration +from landscape.client.deployment import get_versioned_persist +from landscape.client.deployment import init_logging from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.fs import create_text_file +from landscape.lib.fs import read_text_file class BabbleConfiguration(BaseConfiguration): @@ -14,13 +14,12 @@ default_config_filenames = [] def make_parser(self): - parser = super(BabbleConfiguration, self).make_parser() + parser = super().make_parser() parser.add_option("--whatever", metavar="STUFF") return parser class LoggingTest(LandscapeTest): - def test_init_logging_file(self): """Check init_logging sets proper logging paths.""" @@ -33,13 +32,18 @@ init_logging(MyConfiguration(), "fooprog") mock_log.assert_called_once_with( - mock.ANY, 20, "/somepath", "fooprog", mock.ANY, None) + mock.ANY, + 20, + "/somepath", + "fooprog", + mock.ANY, + None, + ) class BaseConfigurationTest(LandscapeTest): - def setUp(self): - super(BaseConfigurationTest, self).setUp() + super().setUp() self.reset_config() def reset_config(self, configuration_class=None): @@ -47,6 +51,7 @@ class MyConfiguration(BaseConfiguration): default_config_filenames = [] + configuration_class = MyConfiguration self.config_class = configuration_class @@ -55,8 +60,10 @@ def write_config_file(self, **kwargs): section_name = kwargs.pop("section_name", "client") - config = "\n".join(["[%s]" % (section_name,)] + - ["%s = %s" % pair for pair in kwargs.items()]) + config = "\n".join( + [f"[{section_name}]"] + + [f"{key} = {value}" for key, value in kwargs.items()], + ) self.config_filename = self.makeFile(config) self.config.default_config_filenames[:] = [self.config_filename] @@ -133,9 +140,8 @@ class ConfigurationTest(LandscapeTest): - def setUp(self): - super(ConfigurationTest, self).setUp() + super().setUp() class MyConfiguration(Configuration): default_config_filenames = [] @@ -148,7 +154,8 @@ def test_log_file_option(self): """Ensure options.log_dir option can be read by parse_args.""" options = self.parser.parse_args( - ["--log-dir", "/var/log/my-awesome-log"])[0] + ["--log-dir", "/var/log/my-awesome-log"], + )[0] self.assertEqual(options.log_dir, "/var/log/my-awesome-log") def test_log_level_default(self): @@ -176,7 +183,8 @@ def test_url_option(self): """Ensure options.url option can be read by parse_args.""" options = self.parser.parse_args( - ["--url", "http://mylandscape/message-system"])[0] + ["--url", "http://mylandscape/message-system"], + )[0] self.assertEqual(options.url, "http://mylandscape/message-system") def test_url_default(self): @@ -187,19 +195,23 @@ def test_ping_url_option(self): """Ensure options.ping_url option can be read by parse_args.""" options = self.parser.parse_args( - ["--ping-url", "http://mylandscape/ping"])[0] + ["--ping-url", "http://mylandscape/ping"], + )[0] self.assertEqual(options.ping_url, "http://mylandscape/ping") def test_ping_url_default(self): """Ensure parse_args sets appropriate ping_url default.""" options = self.parser.parse_args([])[0] self.assertEqual( - options.ping_url, "http://landscape.canonical.com/ping") + options.ping_url, + "http://landscape.canonical.com/ping", + ) def test_ssl_public_key_option(self): """Ensure options.ssl_public_key option can be read by parse_args.""" options = self.parser.parse_args( - ["--ssl-public-key", "/tmp/somekeyfile.ssl"])[0] + ["--ssl-public-key", "/tmp/somekeyfile.ssl"], + )[0] self.assertEqual(options.ssl_public_key, "/tmp/somekeyfile.ssl") def test_ssl_public_key_default(self): @@ -238,7 +250,8 @@ """ self.assertEqual( "/var/lib/landscape/client/sockets", - self.config.sockets_path) + self.config.sockets_path, + ) def test_annotations_path(self): """ @@ -247,7 +260,8 @@ """ self.assertEqual( "/var/lib/landscape/client/annotations.d", - self.config.annotations_path) + self.config.annotations_path, + ) def test_juju_filename(self): """ @@ -256,19 +270,20 @@ """ self.assertEqual( "/var/lib/landscape/client/juju-info.json", - self.config.juju_filename) + self.config.juju_filename, + ) class GetVersionedPersistTest(LandscapeTest): - def test_upgrade_service(self): - - class FakeService(object): + class FakeService: persist_filename = self.makePersistFile(content="") service_name = "monitor" mock_monitor = mock.Mock() - with mock.patch.dict("landscape.client.upgraders.UPGRADE_MANAGERS", - {"monitor": mock_monitor}): + with mock.patch.dict( + "landscape.client.upgraders.UPGRADE_MANAGERS", + {"monitor": mock_monitor}, + ): persist = get_versioned_persist(FakeService()) mock_monitor.apply.assert_called_with(persist) diff -Nru landscape-client-23.02/landscape/client/tests/test_diff.py landscape-client-23.08/landscape/client/tests/test_diff.py --- landscape-client-23.02/landscape/client/tests/test_diff.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_diff.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,7 +3,6 @@ class DiffTest(LandscapeTest): - def test_empty(self): self.assertEqual(diff({}, {}), ({}, {}, {})) @@ -29,6 +28,7 @@ def test_complex(self): old = {"str": "wubble", "int": 10} new = {"strlist": ["foo", "bar"], "int": 25} - self.assertEqual(diff(old, new), ({"strlist": ["foo", "bar"]}, - {"int": 25}, - {"str": "wubble"})) + self.assertEqual( + diff(old, new), + ({"strlist": ["foo", "bar"]}, {"int": 25}, {"str": "wubble"}), + ) diff -Nru landscape-client-23.02/landscape/client/tests/test_lockfile.py landscape-client-23.08/landscape/client/tests/test_lockfile.py --- landscape-client-23.02/landscape/client/tests/test_lockfile.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_lockfile.py 2023-08-17 21:19:32.000000000 +0000 @@ -7,13 +7,17 @@ class LockFileTest(LandscapeTest): - def test_read_process_name(self): - app = self.makeFile(textwrap.dedent("""\ + app = self.makeFile( + textwrap.dedent( + """\ #!/usr/bin/python3 import time time.sleep(10) - """), basename="my_fancy_app") + """, + ), + basename="my_fancy_app", + ) os.chmod(app, 0o755) call = subprocess.Popen([app]) self.addCleanup(call.terminate) diff -Nru landscape-client-23.02/landscape/client/tests/test_patch.py landscape-client-23.08/landscape/client/tests/test_patch.py --- landscape-client-23.02/landscape/client/tests/test_patch.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_patch.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,12 @@ import sqlite3 -from landscape.lib.persist import Persist -from landscape.client.patch import UpgradeManager, SQLiteUpgradeManager +from landscape.client.patch import SQLiteUpgradeManager +from landscape.client.patch import UpgradeManager from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.persist import Persist class PatchTest(LandscapeTest): - def setUp(self): LandscapeTest.setUp(self) self.persist = Persist() @@ -87,7 +87,6 @@ class SQLitePatchTest(LandscapeTest): - def setUp(self): LandscapeTest.setUp(self) self.db_filename = self.makeFile() @@ -103,8 +102,10 @@ """ self.manager.initialize(self.cursor) self.manager.apply(self.cursor) - self.assertEqual(self.manager.get_database_versions(self.cursor), - set()) + self.assertEqual( + self.manager.get_database_versions(self.cursor), + set(), + ) def test_one_patch(self): """Test that patches are called and passed a sqlite db object.""" @@ -152,8 +153,10 @@ self.manager.register_upgrader(5, lambda x: 1 / 0) self.manager.register_upgrader(3, lambda x: 1 / 0) self.manager.initialize(self.cursor) - self.assertEqual(self.manager.get_database_versions(self.cursor), - set([1, 3, 5])) + self.assertEqual( + self.manager.get_database_versions(self.cursor), + {1, 3, 5}, + ) def test_decorated_upgraders_run(self): """ diff -Nru landscape-client-23.02/landscape/client/tests/test_reactor.py landscape-client-23.08/landscape/client/tests/test_reactor.py --- landscape-client-23.02/landscape/client/tests/test_reactor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_reactor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,11 @@ import time from landscape.client.reactor import LandscapeReactor -from landscape.lib.tests.test_reactor import ReactorTestMixin from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.tests.test_reactor import ReactorTestMixin class LandscapeReactorTest(LandscapeTest, ReactorTestMixin): - def get_reactor(self): reactor = LandscapeReactor() # It's not possible to stop the reactor in a Trial test, calling diff -Nru landscape-client-23.02/landscape/client/tests/test_serviceconfig.py landscape-client-23.08/landscape/client/tests/test_serviceconfig.py --- landscape-client-23.02/landscape/client/tests/test_serviceconfig.py 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_serviceconfig.py 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,223 @@ +import importlib +import os +import subprocess +from unittest import mock +from unittest import TestCase + +import landscape.client.serviceconfig as serviceconfig +from landscape.client.serviceconfig import SNAPCTL +from landscape.client.serviceconfig import SnapdConfig +from landscape.client.serviceconfig import SYSTEMCTL +from landscape.client.serviceconfig import SystemdConfig + + +class SystemdConfigTestCase(TestCase): + def setUp(self): + super().setUp() + + run_patch = mock.patch("subprocess.run") + self.run_mock = run_patch.start() + + self.addCleanup(mock.patch.stopall) + + def test_set_to_run_on_boot(self): + SystemdConfig.set_start_on_boot(True) + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "enable", "landscape-client.service", "--quiet"], + stdout=subprocess.PIPE, + ) + + def test_set_to_not_run_on_boot(self): + SystemdConfig.set_start_on_boot(False) + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "disable", "landscape-client.service", "--quiet"], + stdout=subprocess.PIPE, + ) + + def test_configured_to_run(self): + self.run_mock.return_value = subprocess.CompletedProcess("", 0) + + self.assertTrue(SystemdConfig.is_configured_to_run()) + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "is-enabled", "landscape-client.service", "--quiet"], + stdout=subprocess.PIPE, + ) + + def test_not_configured_to_run(self): + self.run_mock.return_value = subprocess.CompletedProcess("", 1) + + self.assertFalse(SystemdConfig.is_configured_to_run()) + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "is-enabled", "landscape-client.service", "--quiet"], + stdout=subprocess.PIPE, + ) + + def test_run_landscape(self): + SystemdConfig.restart_landscape() + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "restart", "landscape-client.service", "--quiet"], + check=True, + stdout=subprocess.PIPE, + ) + + def test_run_landscape_exception(self): + self.run_mock.side_effect = subprocess.CalledProcessError(1, "") + + self.assertRaises( + serviceconfig.ServiceConfigException, + SystemdConfig.restart_landscape, + ) + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "restart", "landscape-client.service", "--quiet"], + check=True, + stdout=subprocess.PIPE, + ) + + def test_stop_landscape(self): + SystemdConfig.stop_landscape() + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "stop", "landscape-client.service", "--quiet"], + check=True, + stdout=subprocess.PIPE, + ) + + def test_stop_landscape_exception(self): + self.run_mock.side_effect = subprocess.CalledProcessError(1, "") + + self.assertRaises( + serviceconfig.ServiceConfigException, + SystemdConfig.stop_landscape, + ) + self.run_mock.assert_called_once_with( + [SYSTEMCTL, "stop", "landscape-client.service", "--quiet"], + check=True, + stdout=subprocess.PIPE, + ) + + +class SnapdConfigTestCase(TestCase): + def setUp(self): + super().setUp() + + run_patch = mock.patch("subprocess.run") + self.run_mock = run_patch.start() + + self.addCleanup(mock.patch.stopall) + + def test_set_to_run_on_boot(self): + SnapdConfig.set_start_on_boot(True) + self.run_mock.assert_called_once_with( + [SNAPCTL, "start", "landscape-client", "--enable"], + stdout=subprocess.PIPE, + ) + + def test_set_to_not_run_on_boot(self): + SnapdConfig.set_start_on_boot(False) + self.run_mock.assert_called_once_with( + [SNAPCTL, "stop", "landscape-client", "--disable"], + stdout=subprocess.PIPE, + ) + + def test_configured_to_run(self): + self.run_mock.return_value = subprocess.CompletedProcess( + "", + 0, + stdout=( + "Service Startup Current Notes\n" + "landscape-client enabled active -\n" + ), + ) + + self.assertTrue(SnapdConfig.is_configured_to_run()) + self.run_mock.assert_called_once_with( + [SNAPCTL, "services", "landscape-client"], + stdout=subprocess.PIPE, + ) + + def test_not_configured_to_run(self): + self.run_mock.return_value = subprocess.CompletedProcess( + "", + 0, + stdout=( + "Service Startup Current Notes\n" + "landscape-client disabled inactive -\n" + ), + ) + + self.assertFalse(SnapdConfig.is_configured_to_run()) + self.run_mock.assert_called_once_with( + [SNAPCTL, "services", "landscape-client"], + stdout=subprocess.PIPE, + ) + + def test_run_landscape(self): + SnapdConfig.restart_landscape() + self.run_mock.assert_called_once_with( + [SNAPCTL, "restart", "landscape-client"], + check=True, + stdout=subprocess.PIPE, + ) + + def test_run_landscape_exception(self): + self.run_mock.side_effect = subprocess.CalledProcessError(1, "") + + self.assertRaises( + serviceconfig.ServiceConfigException, + SnapdConfig.restart_landscape, + ) + self.run_mock.assert_called_once_with( + [SNAPCTL, "restart", "landscape-client"], + check=True, + stdout=subprocess.PIPE, + ) + + def test_stop_landscape(self): + SnapdConfig.stop_landscape() + self.run_mock.assert_called_once_with( + [SNAPCTL, "stop", "landscape-client"], + check=True, + stdout=subprocess.PIPE, + ) + + def test_stop_landscape_exception(self): + self.run_mock.side_effect = subprocess.CalledProcessError(1, "") + + self.assertRaises( + serviceconfig.ServiceConfigException, + SnapdConfig.stop_landscape, + ) + self.run_mock.assert_called_once_with( + [SNAPCTL, "stop", "landscape-client"], + check=True, + stdout=subprocess.PIPE, + ) + + +class ServiceConfigImportTestCase(TestCase): + """Tests validating we import the appropriate ServiceConfig implementation + based on configuration. + + N.B.: `importlib.reload` re-binds imports from the module, so this can mess + with other tests if they rely on identity comparisons, such as in + exception-catching. + """ + + def test_import_systemd(self): + os.environ["SNAP_REVISION"] = "" + + importlib.reload(serviceconfig) + + self.assertEqual( + serviceconfig.ServiceConfig, + serviceconfig.SystemdConfig, + ) + + def test_import_snapd(self): + os.environ["SNAP_REVISION"] = "12345" + + importlib.reload(serviceconfig) + + self.assertEqual( + serviceconfig.ServiceConfig, + serviceconfig.SnapdConfig, + ) diff -Nru landscape-client-23.02/landscape/client/tests/test_service.py landscape-client-23.08/landscape/client/tests/test_service.py --- landscape-client-23.02/landscape/client/tests/test_service.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_service.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,13 +4,13 @@ from twisted.internet import reactor from twisted.internet.task import deferLater -from landscape.lib.testing import FakeReactor from landscape.client.deployment import Configuration from landscape.client.service import LandscapeService from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.testing import FakeReactor -class TestComponent(object): +class TestComponent: name = "monitor" @@ -19,9 +19,8 @@ class LandscapeServiceTest(LandscapeTest): - def setUp(self): - super(LandscapeServiceTest, self).setUp() + super().setUp() self.config = Configuration() self.config.data_path = self.makeDir() self.makeDir(path=self.config.sockets_path) @@ -29,7 +28,7 @@ signal.signal(signal.SIGUSR1, signal.SIG_DFL) def tearDown(self): - super(LandscapeServiceTest, self).tearDown() + super().tearDown() signal.signal(signal.SIGUSR1, signal.SIG_DFL) def test_create_persist(self): @@ -58,9 +57,11 @@ """ logging.getLogger().addHandler(logging.FileHandler(self.makeFile())) # Store the initial set of handlers - original_streams = [handler.stream for handler in - logging.getLogger().handlers if - isinstance(handler, logging.FileHandler)] + original_streams = [ + handler.stream + for handler in logging.getLogger().handlers + if isinstance(handler, logging.FileHandler) + ] # Instantiating LandscapeService should register the handler TestService(self.config) @@ -70,9 +71,11 @@ handler(None, None) def check(ign): - new_streams = [handler.stream for handler in - logging.getLogger().handlers if - isinstance(handler, logging.FileHandler)] + new_streams = [ + handler.stream + for handler in logging.getLogger().handlers + if isinstance(handler, logging.FileHandler) + ] for stream in new_streams: self.assertTrue(stream not in original_streams) diff -Nru landscape-client-23.02/landscape/client/tests/test_sysvconfig.py landscape-client-23.08/landscape/client/tests/test_sysvconfig.py --- landscape-client-23.02/landscape/client/tests/test_sysvconfig.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_sysvconfig.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -import mock - -from landscape.client.tests.helpers import LandscapeTest - -from landscape.client.sysvconfig import SystemdConfig, ProcessError - - -class SystemdConfigTest(LandscapeTest): - - def setUp(self): - super(SystemdConfigTest, self).setUp() - patcher = mock.patch("landscape.client.sysvconfig.Popen") - self.mock_popen = patcher.start() - self.mock_popen.return_value.wait.return_value = 0 - self.addCleanup(patcher.stop) - - def test_set_to_run_on_boot(self): - serviceconfig = SystemdConfig() - serviceconfig.set_start_on_boot(True) - self.mock_popen.assert_called_once_with( - ["systemctl", "enable", "landscape-client.service"]) - - def test_set_to_not_run_on_boot(self): - serviceconfig = SystemdConfig() - serviceconfig.set_start_on_boot(False) - self.mock_popen.assert_called_once_with( - ["systemctl", "disable", "landscape-client.service"]) - - def test_configured_to_run(self): - serviceconfig = SystemdConfig() - self.assertTrue(serviceconfig.is_configured_to_run()) - self.mock_popen.assert_called_once_with( - ["systemctl", "is-enabled", "landscape-client.service"]) - - def test_not_configured_to_run(self): - self.mock_popen.return_value.wait.return_value = 1 - serviceconfig = SystemdConfig() - self.assertFalse(serviceconfig.is_configured_to_run()) - self.mock_popen.assert_called_once_with( - ["systemctl", "is-enabled", "landscape-client.service"]) - - def test_run_landscape(self): - serviceconfig = SystemdConfig() - serviceconfig.restart_landscape() - self.mock_popen.assert_called_once_with( - ["systemctl", "restart", "landscape-client.service"]) - - def test_run_landscape_with_error(self): - self.mock_popen.return_value.wait.return_value = 1 - serviceconfig = SystemdConfig() - self.assertRaises(ProcessError, serviceconfig.restart_landscape) - self.mock_popen.assert_called_once_with( - ["systemctl", "restart", "landscape-client.service"]) - - def test_stop_landscape(self): - serviceconfig = SystemdConfig() - serviceconfig.stop_landscape() - self.mock_popen.assert_called_once_with( - ["systemctl", "stop", "landscape-client.service"]) - - def test_stop_landscape_with_error(self): - self.mock_popen.return_value.wait.return_value = 1 - serviceconfig = SystemdConfig() - self.assertRaises(ProcessError, serviceconfig.stop_landscape) - self.mock_popen.assert_called_once_with( - ["systemctl", "stop", "landscape-client.service"]) diff -Nru landscape-client-23.02/landscape/client/tests/test_watchdog.py landscape-client-23.08/landscape/client/tests/test_watchdog.py --- landscape-client-23.02/landscape/client/tests/test_watchdog.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/tests/test_watchdog.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,35 +1,42 @@ -import stat -import time -import sys import os import signal +import stat +import sys +import time +from unittest import mock -import mock - -from twisted.internet.utils import getProcessOutput -from twisted.internet.defer import Deferred, succeed, fail from twisted.internet import reactor +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import succeed +from twisted.internet.utils import getProcessOutput from twisted.python.fakepwd import UserDatabase -from landscape.lib.encoding import encode_values -from landscape.lib.fs import read_text_file -from landscape.lib.testing import EnvironSaverHelper -from landscape.client.tests.clock import Clock -from landscape.client.tests.helpers import ( - LandscapeTest, FakeBrokerServiceHelper) -from landscape.client.watchdog import ( - Daemon, WatchDog, WatchDogService, ExecutableNotFoundError, - WatchDogConfiguration, bootstrap_list, - MAXIMUM_CONSECUTIVE_RESTARTS, RESTART_BURST_DELAY, run, - Broker, Monitor, Manager) +import landscape.client.watchdog from landscape.client.amp import ComponentConnector from landscape.client.broker.amp import RemoteBrokerConnector from landscape.client.reactor import LandscapeReactor - -import landscape.client.watchdog +from landscape.client.tests.clock import Clock +from landscape.client.tests.helpers import FakeBrokerServiceHelper +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.watchdog import bootstrap_list +from landscape.client.watchdog import Broker +from landscape.client.watchdog import Daemon +from landscape.client.watchdog import ExecutableNotFoundError +from landscape.client.watchdog import Manager +from landscape.client.watchdog import MAXIMUM_CONSECUTIVE_RESTARTS +from landscape.client.watchdog import Monitor +from landscape.client.watchdog import RESTART_BURST_DELAY +from landscape.client.watchdog import run +from landscape.client.watchdog import WatchDog +from landscape.client.watchdog import WatchDogConfiguration +from landscape.client.watchdog import WatchDogService +from landscape.lib.encoding import encode_values +from landscape.lib.fs import read_text_file +from landscape.lib.testing import EnvironSaverHelper -class StubDaemon(object): +class StubDaemon: program = "program-name" @@ -39,15 +46,18 @@ """ def setUp(self): - super(WatchDogTest, self).setUp() - self.broker_factory_patch = ( - mock.patch("landscape.client.watchdog.Broker")) + super().setUp() + self.broker_factory_patch = mock.patch( + "landscape.client.watchdog.Broker", + ) self.broker_factory = self.broker_factory_patch.start() - self.monitor_factory_patch = ( - mock.patch("landscape.client.watchdog.Monitor")) + self.monitor_factory_patch = mock.patch( + "landscape.client.watchdog.Monitor", + ) self.monitor_factory = self.monitor_factory_patch.start() - self.manager_factory_patch = ( - mock.patch("landscape.client.watchdog.Manager")) + self.manager_factory_patch = mock.patch( + "landscape.client.watchdog.Manager", + ) self.manager_factory = self.manager_factory_patch.start() self.config = WatchDogConfiguration() self.addCleanup(self.cleanup_mocks) @@ -70,11 +80,20 @@ def assert_daemons_mocks(self): self.broker_factory.assert_called_with( - mock.ANY, verbose=False, config=None) + mock.ANY, + verbose=False, + config=None, + ) self.monitor_factory.assert_called_with( - mock.ANY, verbose=False, config=None) + mock.ANY, + verbose=False, + config=None, + ) self.manager_factory.assert_called_with( - mock.ANY, verbose=False, config=None) + mock.ANY, + verbose=False, + config=None, + ) def setup_request_exit(self): self.broker.request_exit.return_value = succeed(True) @@ -101,12 +120,19 @@ self.setup_daemons_mocks() WatchDog( enabled_daemons=[self.broker_factory, self.monitor_factory], - config=self.config) + config=self.config, + ) self.broker_factory.assert_called_with( - mock.ANY, verbose=False, config=None) + mock.ANY, + verbose=False, + config=None, + ) self.monitor_factory.assert_called_with( - mock.ANY, verbose=False, config=None) + mock.ANY, + verbose=False, + config=None, + ) self.manager_factory.assert_not_called() def test_check_running_one(self): @@ -117,8 +143,10 @@ result = WatchDog(config=self.config).check_running() def got_result(r): - self.assertEqual([daemon.program for daemon in r], - ["landscape-broker"]) + self.assertEqual( + [daemon.program for daemon in r], + ["landscape-broker"], + ) self.assert_daemons_mocks() self.broker.is_running.assert_called_with() self.monitor.is_running.assert_called_with() @@ -134,9 +162,10 @@ result = WatchDog(config=self.config).check_running() def got_result(r): - self.assertEqual([daemon.program for daemon in r], - ["landscape-broker", "landscape-monitor", - "landscape-manager"]) + self.assertEqual( + [daemon.program for daemon in r], + ["landscape-broker", "landscape-monitor", "landscape-manager"], + ) self.assert_daemons_mocks() return result.addCallback(got_result) @@ -148,8 +177,10 @@ """ self.setup_daemons_mocks() self.broker.is_running.return_value = succeed(True) - result = WatchDog(enabled_daemons=[self.broker_factory], - config=self.config).check_running() + result = WatchDog( + enabled_daemons=[self.broker_factory], + config=self.config, + ).check_running() def got_result(r): self.assertEqual(len(r), 1) @@ -183,7 +214,10 @@ clock = Clock() dog = WatchDog( - clock, enabled_daemons=[self.broker_factory], config=self.config) + clock, + enabled_daemons=[self.broker_factory], + config=self.config, + ) dog.start() self.broker.start.assert_called_once_with() @@ -250,15 +284,15 @@ os.kill(os.getpid(), signal.SIGUSR1) mock_reactor.callFromThread.assert_called_once_with( - watchdog._notify_rotate_logs) + watchdog._notify_rotate_logs, + ) START = "start" STOP = "stop" -class BoringDaemon(object): - +class BoringDaemon: def __init__(self, program): self.program = program self.boots = [] @@ -294,7 +328,8 @@ self.pings += 1 if self.deferred is not None: raise AssertionError( - "is_running called while it's already running!") + "is_running called while it's already running!", + ) self.deferred = Deferred() return self.deferred @@ -304,13 +339,14 @@ class NonMockerWatchDogTests(LandscapeTest): - def test_ping_is_not_rescheduled_until_pings_complete(self): clock = Clock() - dog = WatchDog(clock, - broker=AsynchronousPingDaemon("test-broker"), - monitor=AsynchronousPingDaemon("test-monitor"), - manager=AsynchronousPingDaemon("test-manager")) + dog = WatchDog( + clock, + broker=AsynchronousPingDaemon("test-broker"), + monitor=AsynchronousPingDaemon("test-monitor"), + manager=AsynchronousPingDaemon("test-manager"), + ) dog.start_monitoring() @@ -331,10 +367,12 @@ checks fail, the daemon will be restarted. """ clock = Clock() - dog = WatchDog(clock, - broker=AsynchronousPingDaemon("test-broker"), - monitor=AsynchronousPingDaemon("test-monitor"), - manager=AsynchronousPingDaemon("test-manager")) + dog = WatchDog( + clock, + broker=AsynchronousPingDaemon("test-broker"), + monitor=AsynchronousPingDaemon("test-monitor"), + manager=AsynchronousPingDaemon("test-manager"), + ) dog.start_monitoring() for i in range(4): @@ -356,10 +394,12 @@ require 5 more ping failures to restart the daemon. """ clock = Clock() - dog = WatchDog(clock, - broker=AsynchronousPingDaemon("test-broker"), - monitor=AsynchronousPingDaemon("test-monitor"), - manager=AsynchronousPingDaemon("test-manager")) + dog = WatchDog( + clock, + broker=AsynchronousPingDaemon("test-broker"), + monitor=AsynchronousPingDaemon("test-monitor"), + manager=AsynchronousPingDaemon("test-manager"), + ) dog.start_monitoring() clock.advance(5) @@ -392,10 +432,12 @@ cause the scheduled call to exist but already fired. """ clock = Clock() - dog = WatchDog(clock, - broker=BoringDaemon("test-broker"), - monitor=BoringDaemon("test-monitor"), - manager=AsynchronousPingDaemon("test-manager")) + dog = WatchDog( + clock, + broker=BoringDaemon("test-broker"), + monitor=BoringDaemon("test-monitor"), + manager=AsynchronousPingDaemon("test-manager"), + ) dog.start_monitoring() clock.advance(5) return dog.request_exit() @@ -406,10 +448,12 @@ not be restarted until the process has fully died. """ clock = Clock() - dog = WatchDog(clock, - broker=AsynchronousPingDaemon("test-broker"), - monitor=BoringDaemon("test-monitor"), - manager=BoringDaemon("test-manager")) + dog = WatchDog( + clock, + broker=AsynchronousPingDaemon("test-broker"), + monitor=BoringDaemon("test-monitor"), + manager=BoringDaemon("test-manager"), + ) stop_result = Deferred() dog.broker.stop = lambda: stop_result dog.start_monitoring() @@ -428,10 +472,12 @@ pinged until after the restart completes. """ clock = Clock() - dog = WatchDog(clock, - broker=AsynchronousPingDaemon("test-broker"), - monitor=BoringDaemon("test-monitor"), - manager=BoringDaemon("test-manager")) + dog = WatchDog( + clock, + broker=AsynchronousPingDaemon("test-broker"), + monitor=BoringDaemon("test-monitor"), + manager=BoringDaemon("test-manager"), + ) stop_result = Deferred() dog.broker.stop = lambda: stop_result dog.start_monitoring() @@ -456,10 +502,12 @@ again. """ clock = Clock() - dog = WatchDog(clock, - broker=AsynchronousPingDaemon("test-broker"), - monitor=BoringDaemon("test-monitor"), - manager=BoringDaemon("test-manager")) + dog = WatchDog( + clock, + broker=AsynchronousPingDaemon("test-broker"), + monitor=BoringDaemon("test-monitor"), + manager=BoringDaemon("test-manager"), + ) dog.start_monitoring() for i in range(5): @@ -482,12 +530,15 @@ """ self.log_helper.ignore_errors( "Couldn't request that broker gracefully shut down; " - "killing forcefully.") + "killing forcefully.", + ) clock = Clock() - dog = WatchDog(clock, - broker=BoringDaemon("test-broker"), - monitor=BoringDaemon("test-monitor"), - manager=BoringDaemon("test-manager")) + dog = WatchDog( + clock, + broker=BoringDaemon("test-broker"), + monitor=BoringDaemon("test-monitor"), + manager=BoringDaemon("test-manager"), + ) # request_exit returns False when there's no broker, as tested by # DaemonTest.test_request_exit_without_broker @@ -501,13 +552,14 @@ def stop(): manager_result.callback(True) return succeed(True) + dog.manager.stop = stop result = dog.request_exit() return result -class StubBroker(object): +class StubBroker: name = "broker" @@ -524,7 +576,7 @@ EXEC_NAME = "landscape-broker" def setUp(self): - super(DaemonTestBase, self).setUp() + super().setUp() if hasattr(self, "broker_service"): # DaemonBrokerTest @@ -536,30 +588,34 @@ self.config.data_path = self.makeDir() self.makeDir(path=self.config.sockets_path) - self.connector = self.connector_factory(LandscapeReactor(), - self.config) + self.connector = self.connector_factory( + LandscapeReactor(), + self.config, + ) self.daemon = self.get_daemon() def tearDown(self): if hasattr(self, "broker_service"): # DaemonBrokerTest self.broker_service.stopService() - super(DaemonTestBase, self).tearDown() + super().tearDown() def get_daemon(self, **kwargs): - if 'username' in kwargs: + if "username" in kwargs: + class MyDaemon(Daemon): - username = kwargs.pop('username') + username = kwargs.pop("username") + + daemon = MyDaemon(self.connector, **kwargs) + else: - MyDaemon = Daemon - daemon = MyDaemon(self.connector, **kwargs) + daemon = Daemon(self.connector, **kwargs) daemon.program = self.EXEC_NAME daemon.factor = 0.01 return daemon -class FileChangeWaiter(object): - +class FileChangeWaiter: def __init__(self, filename): os.utime(filename, (0, 0)) self._mtime = os.path.getmtime(filename) @@ -572,11 +628,11 @@ time.sleep(0.1) if timeout and time.time() > end: raise RuntimeError( - "timed out after {} seconds".format(timeout)) + f"timed out after {timeout} seconds", + ) class DaemonTest(DaemonTestBase): - def _write_script(self, content): filename = self.write_script(self.config, self.EXEC_NAME, content) self.daemon.BIN_DIR = self.config.bindir @@ -596,8 +652,7 @@ def test_start_process(self): output_filename = self.makeFile("NOT RUN") - self._write_script( - '#!/bin/sh\necho "RUN $@" > %s' % output_filename) + self._write_script(f'#!/bin/sh\necho "RUN $@" > {output_filename}') waiter = FileChangeWaiter(output_filename) @@ -605,15 +660,16 @@ waiter.wait() - self.assertEqual(open(output_filename).read(), - "RUN --ignore-sigint --quiet\n") + self.assertEqual( + open(output_filename).read(), + "RUN --ignore-sigint --quiet\n", + ) return self.daemon.stop() def test_start_process_with_verbose(self): output_filename = self.makeFile("NOT RUN") - self._write_script( - '#!/bin/sh\necho "RUN $@" > %s' % output_filename) + self._write_script(f'#!/bin/sh\necho "RUN $@" > {output_filename}') waiter = FileChangeWaiter(output_filename) @@ -623,8 +679,7 @@ waiter.wait(timeout=10) - self.assertEqual(open(output_filename).read(), - "RUN --ignore-sigint\n") + self.assertEqual(open(output_filename).read(), "RUN --ignore-sigint\n") return daemon.stop() @@ -632,13 +687,15 @@ """The stop() method sends SIGTERM to the subprocess.""" output_filename = self.makeFile("NOT RUN") self._write_script( - ("#!%s\n" - "import time\n" - "file = open(%r, 'w')\n" - "file.write('RUN')\n" - "file.close()\n" - "time.sleep(1000)\n" - ) % (sys.executable, output_filename)) + ( + f"#!{sys.executable}\n" + "import time\n" + f"file = open({output_filename!r}, 'w')\n" + "file.write('RUN')\n" + "file.close()\n" + "time.sleep(1000)\n" + ), + ) waiter = FileChangeWaiter(output_filename) self.daemon.start() @@ -654,17 +711,23 @@ """ output_filename = self.makeFile("NOT RUN") self._write_script( - ("#!%s\n" - "import signal, os\n" - "signal.signal(signal.SIGTERM, signal.SIG_IGN)\n" - "file = open(%r, 'w')\n" - "file.write('RUN')\n" - "file.close()\n" - "os.kill(os.getpid(), signal.SIGSTOP)\n" - ) % (sys.executable, output_filename)) - - self.addCleanup(setattr, landscape.client.watchdog, "SIGKILL_DELAY", - landscape.client.watchdog.SIGKILL_DELAY) + ( + f"#!{sys.executable}\n" + "import signal, os\n" + "signal.signal(signal.SIGTERM, signal.SIG_IGN)\n" + f"file = open({output_filename!r}, 'w')\n" + "file.write('RUN')\n" + "file.close()\n" + "os.kill(os.getpid(), signal.SIGSTOP)\n" + ), + ) + + self.addCleanup( + setattr, + landscape.client.watchdog, + "SIGKILL_DELAY", + landscape.client.watchdog.SIGKILL_DELAY, + ) landscape.client.watchdog.SIGKILL_DELAY = 1 waiter = FileChangeWaiter(output_filename) @@ -679,13 +742,13 @@ died. """ output_filename = self.makeFile("NOT RUN") - self._write_script( - '#!/bin/sh\necho "RUN" > %s' % output_filename) + self._write_script(f'#!/bin/sh\necho "RUN" > {output_filename}') self.daemon.start() def got_result(result): self.assertEqual(open(output_filename).read(), "RUN\n") + return self.daemon.wait().addCallback(got_result) def test_wait_or_die_dies_happily(self): @@ -694,42 +757,47 @@ certain amount of time, just like C{wait}. """ output_filename = self.makeFile("NOT RUN") - self._write_script( - '#!/bin/sh\necho "RUN" > %s' % output_filename) + self._write_script(f'#!/bin/sh\necho "RUN" > {output_filename}') self.daemon.start() def got_result(result): self.assertEqual(open(output_filename).read(), "RUN\n") + return self.daemon.wait_or_die().addCallback(got_result) def test_wait_or_die_terminates(self): """wait_or_die eventually terminates the process.""" output_filename = self.makeFile("NOT RUN") self._write_script( - """\ -#!%(exe)s + f"""\ +#!{sys.executable} import time import signal -file = open(%(out)r, 'w') +file = open({output_filename!r}, 'w') file.write('unsignalled') file.close() def term(frame, sig): - file = open(%(out)r, 'w') + file = open({output_filename!r}, 'w') file.write('TERMINATED') file.close() signal.signal(signal.SIGTERM, term) time.sleep(999) - """ % {"exe": sys.executable, "out": output_filename}) + """, + ) - self.addCleanup(setattr, - landscape.client.watchdog, "GRACEFUL_WAIT_PERIOD", - landscape.client.watchdog.GRACEFUL_WAIT_PERIOD) + self.addCleanup( + setattr, + landscape.client.watchdog, + "GRACEFUL_WAIT_PERIOD", + landscape.client.watchdog.GRACEFUL_WAIT_PERIOD, + ) landscape.client.watchdog.GRACEFUL_WAIT_PERIOD = 0.2 self.daemon.start() def got_result(result): self.assertEqual(open(output_filename).read(), "TERMINATED") + return self.daemon.wait_or_die().addCallback(got_result) def test_wait_or_die_kills(self): @@ -739,21 +807,29 @@ """ output_filename = self.makeFile("NOT RUN") self._write_script( - ("#!%s\n" - "import signal, os\n" - "signal.signal(signal.SIGTERM, signal.SIG_IGN)\n" - "file = open(%r, 'w')\n" - "file.write('RUN')\n" - "file.close()\n" - "os.kill(os.getpid(), signal.SIGSTOP)\n" - ) % (sys.executable, output_filename)) - - self.addCleanup(setattr, - landscape.client.watchdog, "SIGKILL_DELAY", - landscape.client.watchdog.SIGKILL_DELAY) - self.addCleanup(setattr, - landscape.client.watchdog, "GRACEFUL_WAIT_PERIOD", - landscape.client.watchdog.GRACEFUL_WAIT_PERIOD) + ( + f"#!{sys.executable}\n" + "import signal, os\n" + "signal.signal(signal.SIGTERM, signal.SIG_IGN)\n" + f"file = open({output_filename!r}, 'w')\n" + "file.write('RUN')\n" + "file.close()\n" + "os.kill(os.getpid(), signal.SIGSTOP)\n" + ), + ) + + self.addCleanup( + setattr, + landscape.client.watchdog, + "SIGKILL_DELAY", + landscape.client.watchdog.SIGKILL_DELAY, + ) + self.addCleanup( + setattr, + landscape.client.watchdog, + "GRACEFUL_WAIT_PERIOD", + landscape.client.watchdog.GRACEFUL_WAIT_PERIOD, + ) landscape.client.watchdog.GRACEFUL_WAIT_PERIOD = 1 landscape.client.watchdog.SIGKILL_DELAY = 1 @@ -795,20 +871,24 @@ """ stop = [] stopped = [] - self.log_helper.ignore_errors("Can't keep landscape-broker running. " - "Exiting.") + self.log_helper.ignore_errors( + "Can't keep landscape-broker running. " "Exiting.", + ) output_filename = self.makeFile("NOT RUN") - self._write_script( - "#!/bin/sh\necho RUN >> %s" % output_filename) + self._write_script(f"#!/bin/sh\necho RUN >> {output_filename}") def got_result(result): - self.assertEqual(len(list(open(output_filename))), - MAXIMUM_CONSECUTIVE_RESTARTS) - - self.assertTrue("Can't keep landscape-broker running." in - self.logfile.getvalue()) + self.assertEqual( + len(list(open(output_filename))), + MAXIMUM_CONSECUTIVE_RESTARTS, + ) + + self.assertTrue( + "Can't keep landscape-broker running." + in self.logfile.getvalue(), + ) self.assertCountEqual([True], stopped) reactor.stop = stop[0] @@ -836,23 +916,27 @@ # This test hacks the first time() call to make it return a timestamp # that happend a while ago, and so give the impression that some time # has passed and it's fine to restart more times again. - self.log_helper.ignore_errors("Can't keep landscape-broker running. " - "Exiting.") + self.log_helper.ignore_errors( + "Can't keep landscape-broker running. Exiting.", + ) stop = [] stopped = [] output_filename = self.makeFile("NOT RUN") - self._write_script( - "#!/bin/sh\necho RUN >> %s" % output_filename) + self._write_script(f"#!/bin/sh\necho RUN >> {output_filename}") def got_result(result): # Pay attention to the +1 bellow. It's the reason for this test. - self.assertEqual(len(list(open(output_filename))), - MAXIMUM_CONSECUTIVE_RESTARTS + 1) - - self.assertTrue("Can't keep landscape-broker running." in - self.logfile.getvalue()) + self.assertEqual( + len(list(open(output_filename))), + MAXIMUM_CONSECUTIVE_RESTARTS + 1, + ) + + self.assertTrue( + "Can't keep landscape-broker running." + in self.logfile.getvalue(), + ) self.assertCountEqual([True], stopped) reactor.stop = stop[0] @@ -868,8 +952,12 @@ before.append(True) return original_time() - RESTART_BURST_DELAY return original_time() + time_patcher = mock.patch.object( - time, "time", side_effect=time_sideeffect) + time, + "time", + side_effect=time_sideeffect, + ) time_patcher.start() self.addCleanup(time_patcher.stop) @@ -904,12 +992,12 @@ """ self._write_script("#!/bin/sh") - class getpwnam_result: + class GetPwNamResult: pw_uid = 123 pw_gid = 456 pw_dir = "/var/lib/landscape" - getpwnam.return_value = getpwnam_result() + getpwnam.return_value = GetPwNamResult() reactor = mock.Mock() @@ -930,7 +1018,13 @@ env = encode_values(env) reactor.spawnProcess.assert_called_with( - mock.ANY, mock.ANY, args=mock.ANY, env=env, uid=123, gid=456) + mock.ANY, + mock.ANY, + args=mock.ANY, + env=env, + uid=123, + gid=456, + ) @mock.patch("os.getuid", return_value=555) def test_spawn_process_without_root(self, mock_getuid): @@ -946,8 +1040,13 @@ daemon.start() reactor.spawnProcess.assert_called_with( - mock.ANY, mock.ANY, args=mock.ANY, env=mock.ANY, uid=None, - gid=None) + mock.ANY, + mock.ANY, + args=mock.ANY, + env=mock.ANY, + uid=None, + gid=None, + ) @mock.patch("os.getgid", return_value=0) @mock.patch("os.getuid", return_value=0) @@ -964,24 +1063,35 @@ daemon.start() reactor.spawnProcess.assert_called_with( - mock.ANY, mock.ANY, args=mock.ANY, env=mock.ANY, uid=None, - gid=None) + mock.ANY, + mock.ANY, + args=mock.ANY, + env=mock.ANY, + uid=None, + gid=None, + ) def test_request_exit(self): """The request_exit() method calls exit() on the broker process.""" output_filename = self.makeFile("NOT CALLED") socket_filename = os.path.join(self.config.sockets_path, "broker.sock") - broker_filename = self.makeFile(STUB_BROKER % - {"executable": sys.executable, - "path": sys.path, - "output_filename": output_filename, - "socket": socket_filename}) + broker_filename = self.makeFile( + STUB_BROKER.format( + executable=sys.executable, + path=sys.path, + output_filename=output_filename, + socket=socket_filename, + ), + ) os.chmod(broker_filename, 0o755) env = encode_values(os.environ) - process_result = getProcessOutput(broker_filename, env=env, - errortoo=True) + process_result = getProcessOutput( + broker_filename, + env=env, + errortoo=True, + ) # Wait until the process starts up. This can take a few seconds # depending on io, so keep trying the call a few times. @@ -1020,9 +1130,8 @@ class WatchDogOptionsTest(LandscapeTest): - def setUp(self): - super(WatchDogOptionsTest, self).setUp() + super().setUp() self.config = WatchDogConfiguration() self.config.default_config_filenames = [] @@ -1044,26 +1153,33 @@ def test_monitor_only(self): self.config.load(["--monitor-only"]) - self.assertEqual(self.config.get_enabled_daemons(), - [Broker, Monitor]) + self.assertEqual(self.config.get_enabled_daemons(), [Broker, Monitor]) def test_default_daemons(self): self.config.load([]) - self.assertEqual(self.config.get_enabled_daemons(), - [Broker, Monitor, Manager]) + self.assertEqual( + self.config.get_enabled_daemons(), + [Broker, Monitor, Manager], + ) class WatchDogServiceTest(LandscapeTest): - def setUp(self): - super(WatchDogServiceTest, self).setUp() + super().setUp() self.configuration = WatchDogConfiguration() self.data_path = self.makeDir() self.log_dir = self.makeDir() self.config_filename = self.makeFile("[client]\n") - self.configuration.load(["--config", self.config_filename, - "--data-path", self.data_path, - "--log-dir", self.log_dir]) + self.configuration.load( + [ + "--config", + self.config_filename, + "--data-path", + self.data_path, + "--log-dir", + self.log_dir, + ], + ) @mock.patch("landscape.client.watchdog.daemonize") @mock.patch("landscape.client.watchdog.WatchDog") @@ -1088,7 +1204,7 @@ service = WatchDogService(self.configuration) service.startService() - self.assertEqual(int(open(pid_file, "r").read()), os.getpid()) + self.assertEqual(int(open(pid_file).read()), os.getpid()) mock_watchdog().check_running.assert_called_once_with() mock_watchdog().start.assert_called_once_with() mock_daemonize.assert_called_once_with() @@ -1097,7 +1213,11 @@ @mock.patch("landscape.client.watchdog.daemonize") @mock.patch("landscape.client.watchdog.WatchDog") def test_dont_write_pid_file_until_we_really_start( - self, mock_watchdog, mock_daemonize, mock_reactor): + self, + mock_watchdog, + mock_daemonize, + mock_reactor, + ): """ If the client can't be started because another client is still running, the client shouldn't be daemonized and the pid file shouldn't be @@ -1106,7 +1226,8 @@ mock_watchdog().check_running.return_value = succeed([StubDaemon()]) mock_reactor.crash.return_value = None self.log_helper.ignore_errors( - "ERROR: The following daemons are already running: program-name") + "ERROR: The following daemons are already running: program-name", + ) pid_file = self.makeFile() self.configuration.daemon = True @@ -1159,7 +1280,10 @@ @mock.patch("landscape.client.watchdog.os.access", return_value=False) @mock.patch("landscape.client.watchdog.WatchDog") def test_remove_pid_file_doesnt_explode_on_inaccessibility( - self, mock_watchdog, mock_access): + self, + mock_watchdog, + mock_access, + ): mock_watchdog().request_exit.return_value = succeed(None) pid_file = self.makeFile() @@ -1169,15 +1293,18 @@ service.stopService() self.assertTrue(mock_watchdog().request_exit.called) self.assertTrue(os.path.exists(pid_file)) - mock_access.assert_called_once_with( - pid_file, os.W_OK) + mock_access.assert_called_once_with(pid_file, os.W_OK) @mock.patch("landscape.client.watchdog.reactor") @mock.patch("landscape.client.watchdog.bootstrap_list") def test_start_service_exits_when_already_running( - self, mock_bootstrap_list, mock_reactor): + self, + mock_bootstrap_list, + mock_reactor, + ): self.log_helper.ignore_errors( - "ERROR: The following daemons are already running: program-name") + "ERROR: The following daemons are already running: program-name", + ) service = WatchDogService(self.configuration) service.watchdog = mock.Mock() @@ -1185,7 +1312,9 @@ result = service.startService() self.assertEqual(service.exit_code, 1) mock_bootstrap_list.bootstrap.assert_called_once_with( - data_path=self.data_path, log_dir=self.log_dir) + data_path=self.data_path, + log_dir=self.log_dir, + ) service.watchdog.check_running.assert_called_once_with() self.assertTrue(mock_reactor.crash.called) return result @@ -1193,7 +1322,10 @@ @mock.patch("landscape.client.watchdog.reactor") @mock.patch("landscape.client.watchdog.bootstrap_list") def test_start_service_exits_when_unknown_errors_occur( - self, mock_bootstrap_list, mock_reactor): + self, + mock_bootstrap_list, + mock_reactor, + ): self.log_helper.ignore_errors(ZeroDivisionError) service = WatchDogService(self.configuration) @@ -1205,7 +1337,9 @@ result = service.startService() self.assertEqual(service.exit_code, 2) mock_bootstrap_list.bootstrap.assert_called_once_with( - data_path=self.data_path, log_dir=self.log_dir) + data_path=self.data_path, + log_dir=self.log_dir, + ) service.watchdog.check_running.assert_called_once_with() service.watchdog.start.assert_called_once_with() mock_reactor.crash.assert_called_once_with() @@ -1223,23 +1357,25 @@ mock_getgrnam("root").gr_gid = 5678 with mock.patch("landscape.lib.bootstrap.pwd", new=fake_pwd): - bootstrap_list.bootstrap(data_path=data_path, - log_dir=log_dir) + bootstrap_list.bootstrap(data_path=data_path, log_dir=log_dir) def path(*suffix): return os.path.join(data_path, *suffix) - paths = ["package", - "package/hash-id", - "package/binaries", - "package/upgrade-tool", - "messages", - "sockets", - "custom-graph-scripts", - log_dir, - "package/database"] - calls = [mock.call(path(path_comps), 1234, 5678) - for path_comps in paths] + paths = [ + "package", + "package/hash-id", + "package/binaries", + "package/upgrade-tool", + "messages", + "sockets", + "custom-graph-scripts", + log_dir, + "package/database", + ] + calls = [ + mock.call(path(path_comps), 1234, 5678) for path_comps in paths + ] mock_chown.assert_has_calls([mock.call(path(), 1234, 5678)] + calls) self.assertTrue(os.path.isdir(path())) self.assertTrue(os.path.isdir(path("package"))) @@ -1262,7 +1398,7 @@ STUB_BROKER = """\ -#!%(executable)s +#!{executable} import sys import warnings @@ -1271,7 +1407,7 @@ from twisted.internet import reactor -sys.path = %(path)r +sys.path = {path!r} from landscape.lib.amp import MethodCallServerFactory from landscape.client.broker.server import BrokerServer @@ -1280,7 +1416,7 @@ class StubBroker(object): def exit(self): - file = open(%(output_filename)r, "w") + file = open({output_filename!r}, "w") file.write("CALLED") file.close() reactor.callLater(1, reactor.stop) @@ -1288,7 +1424,7 @@ stub_broker = StubBroker() methods = get_remote_methods(BrokerServer) factory = MethodCallServerFactory(stub_broker, methods) -reactor.listenUNIX(%(socket)r, factory) +reactor.listenUNIX({socket!r}, factory) reactor.run() """ @@ -1305,7 +1441,7 @@ helpers = [EnvironSaverHelper] def setUp(self): - super(WatchDogRunTests, self).setUp() + super().setUp() self.fake_pwd = UserDatabase() @mock.patch("os.getuid", return_value=1000) @@ -1314,19 +1450,27 @@ The watchdog should print an error message and exit if run by a normal user. """ - self.fake_pwd.addUser( - "landscape", None, 1001, None, None, None, None) + self.fake_pwd.addUser("landscape", None, 1001, None, None, None, None) with mock.patch("landscape.client.watchdog.pwd", new=self.fake_pwd): sys_exit = self.assertRaises(SystemExit, run, ["landscape-client"]) - self.assertIn("landscape-client can only be run" - " as 'root' or 'landscape'.", str(sys_exit)) + self.assertIn( + "landscape-client can only be run as 'root' or 'landscape'.", + str(sys_exit), + ) def test_landscape_user(self): """ The watchdog *can* be run as the 'landscape' user. """ self.fake_pwd.addUser( - "landscape", None, os.getuid(), None, None, None, None) + "landscape", + None, + os.getuid(), + None, + None, + None, + None, + ) reactor = FakeReactor() with mock.patch("landscape.client.watchdog.pwd", new=self.fake_pwd): run(["--log-dir", self.makeDir()], reactor=reactor) @@ -1343,7 +1487,14 @@ def test_clean_environment(self): self.fake_pwd.addUser( - "landscape", None, os.getuid(), None, None, None, None) + "landscape", + None, + os.getuid(), + None, + None, + None, + None, + ) os.environ["DEBIAN_YO"] = "yo" os.environ["DEBCONF_YO"] = "yo" os.environ["LANDSCAPE_ATTACHMENTS"] = "some attachments" diff -Nru landscape-client-23.02/landscape/client/upgraders/__init__.py landscape-client-23.08/landscape/client/upgraders/__init__.py --- landscape-client-23.02/landscape/client/upgraders/__init__.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/upgraders/__init__.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,11 @@ -from landscape.client.upgraders import broker, monitor, package +from landscape.client.upgraders import broker +from landscape.client.upgraders import monitor +from landscape.client.upgraders import package UPGRADE_MANAGERS = { # these should not be hardcoded "broker": broker.upgrade_manager, "monitor": monitor.upgrade_manager, - "package": package.upgrade_manager} + "package": package.upgrade_manager, +} diff -Nru landscape-client-23.02/landscape/client/upgraders/tests/test_broker.py landscape-client-23.08/landscape/client/upgraders/tests/test_broker.py --- landscape-client-23.02/landscape/client/upgraders/tests/test_broker.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/upgraders/tests/test_broker.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,8 @@ -from landscape.client.tests.helpers import LandscapeTest from landscape.client.patch import UpgradeManager - +from landscape.client.tests.helpers import LandscapeTest from landscape.client.upgraders import broker class TestBrokerUpgraders(LandscapeTest): - def test_broker_upgrade_manager(self): self.assertEqual(type(broker.upgrade_manager), UpgradeManager) diff -Nru landscape-client-23.02/landscape/client/upgraders/tests/test_monitor.py landscape-client-23.08/landscape/client/upgraders/tests/test_monitor.py --- landscape-client-23.02/landscape/client/upgraders/tests/test_monitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/upgraders/tests/test_monitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,8 @@ -from landscape.client.tests.helpers import LandscapeTest from landscape.client.patch import UpgradeManager - +from landscape.client.tests.helpers import LandscapeTest from landscape.client.upgraders import monitor class TestMonitorUpgraders(LandscapeTest): - def test_monitor_upgrade_manager(self): self.assertEqual(type(monitor.upgrade_manager), UpgradeManager) diff -Nru landscape-client-23.02/landscape/client/upgraders/tests/test_package.py landscape-client-23.08/landscape/client/upgraders/tests/test_package.py --- landscape-client-23.02/landscape/client/upgraders/tests/test_package.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/upgraders/tests/test_package.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,8 @@ -from landscape.client.tests.helpers import LandscapeTest from landscape.client.patch import SQLiteUpgradeManager - +from landscape.client.tests.helpers import LandscapeTest from landscape.client.upgraders import package class TestPackageUpgraders(LandscapeTest): - def test_package_upgrade_manager(self): self.assertEqual(type(package.upgrade_manager), SQLiteUpgradeManager) diff -Nru landscape-client-23.02/landscape/client/user/changes.py landscape-client-23.08/landscape/client/user/changes.py --- landscape-client-23.02/landscape/client/user/changes.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/changes.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,10 @@ -from twisted.python.compat import iteritems, itervalues +from twisted.python.compat import iteritems +from twisted.python.compat import itervalues from landscape.client.diff import diff -class UserChanges(object): +class UserChanges: """Detect changes made since the last snapshot was taken. If no snapshot is available all users and groups are reported. @@ -12,7 +13,7 @@ """ def __init__(self, persist, provider): - super(UserChanges, self).__init__() + super().__init__() self._persist = persist self._provider = provider # FIXME This shouldn't really be necessary. Not having it @@ -27,9 +28,13 @@ self._old_users = self._persist.get("users", {}) self._old_groups = self._persist.get("groups", {}) self._new_users = self._create_index( - "username", self._provider.get_users()) + "username", + self._provider.get_users(), + ) self._new_groups = self._create_index( - "name", self._provider.get_groups()) + "name", + self._provider.get_groups(), + ) def snapshot(self): """Save the current state and use it as a comparison snapshot.""" @@ -123,8 +128,9 @@ if removed: remove_members[groupname] = sorted(removed) if old_data["gid"] != new_data["gid"]: - update_groups.append({"name": groupname, - "gid": new_data["gid"]}) + update_groups.append( + {"name": groupname, "gid": new_data["gid"]}, + ) if create_members: members = changes.setdefault("create-group-members", {}) members.update(create_members) diff -Nru landscape-client-23.02/landscape/client/user/management.py landscape-client-23.08/landscape/client/user/management.py --- landscape-client-23.02/landscape/client/user/management.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/management.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,62 +3,90 @@ # API, with thorough usage of exceptions and such, instead of pipes to # subprocesses. liboobs (i.e. System Tools) is a possibility, and has # documentation now in the 2.17 series, but is not wrapped to Python. - import logging import subprocess -from landscape.client.user.provider import UserManagementError, UserProvider +from landscape.client.user.provider import UserManagementError +from landscape.client.user.provider import UserProvider -class UserManagement(object): +class UserManagement: """Manage system users and groups.""" def __init__(self, provider=None): self._provider = provider or UserProvider() - def add_user(self, username, name, password, require_password_reset, - primary_group_name, location, work_phone, home_phone): + def add_user( + self, + username, + name, + password, + require_password_reset, + primary_group_name, + location, + work_phone, + home_phone, + ): """Add C{username} to the computer. @raises UserManagementError: Raised when C{adduser} fails. @raises UserManagementError: Raised when C{passwd} fails. """ logging.info("Adding user %s.", username) - gecos = "%s,%s,%s,%s" % (name, location or "", work_phone or "", - home_phone or "") - command = ["adduser", username, "--disabled-password", "--gecos", - gecos] + gecos = "{},{},{},{}".format( + name, + location or "", + work_phone or "", + home_phone or "", + ) + command = [ + "adduser", + username, + "--disabled-password", + "--gecos", + gecos, + ] if primary_group_name: - command.extend(["--gid", str(self._provider.get_gid( - primary_group_name))]) + command.extend( + ["--gid", str(self._provider.get_gid(primary_group_name))], + ) result, output = self.call_popen(command) if result != 0: - raise UserManagementError("Error adding user %s.\n%s" % - (username, output)) + raise UserManagementError( + f"Error adding user {username}.\n{output}", + ) self._set_password(username, password) if require_password_reset: result, new_output = self.call_popen(["passwd", username, "-e"]) if result != 0: - raise UserManagementError("Error resetting password for user " - "%s.\n%s" % (username, new_output)) + raise UserManagementError( + "Error resetting password for user " + f"{username}.\n{new_output}", + ) else: output += new_output return output def _set_password(self, username, password): - chpasswd_input = "{}:{}".format(username, password).encode("utf-8") - chpasswd = self._provider.popen(["chpasswd"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + chpasswd_input = f"{username}:{password}".encode("utf-8") + chpasswd = self._provider.popen( + ["chpasswd"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) output, stderr = chpasswd.communicate(chpasswd_input) result = chpasswd.returncode if result != 0: username = username.encode("utf-8") raise UserManagementError( "Error setting password for user {}.\n{} {}".format( - username, output, stderr)) + username, + output, + stderr, + ), + ) return output def _set_primary_group(self, username, groupname): @@ -66,14 +94,22 @@ command = ["usermod", "-g", str(primary_gid), username] result, output = self.call_popen(command) if result != 0: - raise UserManagementError("Error setting primary group to %d for" - "%s.\n%s" % (primary_gid, username, - output)) + raise UserManagementError( + f"Error setting primary group to {primary_gid:d} for" + f"{username}.\n{output}", + ) return output - def set_user_details(self, username, password=None, name=None, - location=None, work_number=None, home_number=None, - primary_group_name=None): + def set_user_details( + self, + username, + password=None, + name=None, + location=None, + work_number=None, + home_number=None, + primary_group_name=None, + ): """Update details for the account matching C{uid}.""" uid = self._provider.get_uid(username) logging.info("Updating data for user %s (UID %d).", username, uid) @@ -84,16 +120,21 @@ self._set_primary_group(username, primary_group_name) command = ["chfn"] - for option, value in [("-r", location), ("-f", name), - ("-w", work_number), ("-h", home_number)]: + for option, value in [ + ("-r", location), + ("-f", name), + ("-w", work_number), + ("-h", home_number), + ]: if value is not None: command += [option, value] if len(command) > 1: result, output = self.call_popen(command + [username]) if result != 0: - raise UserManagementError("Error setting details for user " - "%s.\n%s" % (username, output)) + raise UserManagementError( + "Error setting details for user " f"{username}.\n{output}", + ) return output def lock_user(self, username): @@ -104,8 +145,9 @@ logging.info("Locking out user %s (UID %d).", username, uid) result, output = self.call_popen(["usermod", "-L", username]) if result != 0: - raise UserManagementError("Error locking user %s.\n%s" % - (username, output)) + raise UserManagementError( + f"Error locking user {username}.\n{output}", + ) def unlock_user(self, username): """Unlock the account matching C{username}.""" @@ -114,8 +156,9 @@ result, output = self.call_popen(["usermod", "-U", username]) if result != 0: - raise UserManagementError("Error unlocking user %s.\n%s" - % (username, output)) + raise UserManagementError( + f"Error unlocking user {username}.\n{output}", + ) return output def remove_user(self, username, delete_home=False): @@ -123,17 +166,26 @@ uid = self._provider.get_uid(username) command = ["deluser", username] if delete_home: - logging.info("Removing user %s (UID %d) and deleting their home " - "directory.", username, uid) + logging.info( + "Removing user %s (UID %d) and deleting their home " + "directory.", + username, + uid, + ) command.append("--remove-home") else: - logging.info("Removing user %s (UID %d) without deleting their " - "home directory.", username, uid) + logging.info( + "Removing user %s (UID %d) without deleting their " + "home directory.", + username, + uid, + ) result, output = self.call_popen(command) if result != 0: - raise UserManagementError("Error removing user %s (UID %d).\n%s" - % (username, uid, output)) + raise UserManagementError( + f"Error removing user {username} (UID {uid:d}).\n{output}", + ) return output def add_group(self, groupname): @@ -141,21 +193,27 @@ logging.info("Adding group %s.", groupname) result, output = self.call_popen(["addgroup", groupname]) if result != 0: - raise UserManagementError("Error adding group %s.\n%s" % - (groupname, output)) + raise UserManagementError( + f"Error adding group {groupname}.\n{output}", + ) return output def set_group_details(self, groupname, new_name): """Update details for the group matching C{gid}.""" gid = self._provider.get_gid(groupname) - logging.info("Renaming group %s (GID %d) to %s.", - groupname, gid, new_name) + logging.info( + "Renaming group %s (GID %d) to %s.", + groupname, + gid, + new_name, + ) command = ["groupmod", "-n", new_name, groupname] result, output = self.call_popen(command) if result != 0: - raise UserManagementError("Error renaming group %s (GID %d) to " - "%s.\n%s" % (groupname, gid, new_name, - output)) + raise UserManagementError( + f"Error renaming group {groupname} (GID {gid:d}) to " + f"{new_name}.\n{output}", + ) return output def add_group_member(self, username, groupname): @@ -165,14 +223,21 @@ """ uid = self._provider.get_uid(username) gid = self._provider.get_gid(groupname) - logging.info("Adding user %s (UID %d) to group %s (GID %d).", - username, uid, groupname, gid) - result, output = self.call_popen(["gpasswd", "-a", username, - groupname]) - if result != 0: - raise UserManagementError("Error adding user %s (UID %d) to " - "group %s (GID %d).\n%s" % - (username, uid, groupname, gid, output)) + logging.info( + "Adding user %s (UID %d) to group %s (GID %d).", + username, + uid, + groupname, + gid, + ) + result, output = self.call_popen( + ["gpasswd", "-a", username, groupname], + ) + if result != 0: + raise UserManagementError( + f"Error adding user {username} (UID {uid:d}) to " + f"group {groupname} (GID {gid:d}).\n{output}", + ) return output def remove_group_member(self, username, groupname): @@ -182,15 +247,21 @@ """ uid = self._provider.get_uid(username) gid = self._provider.get_gid(groupname) - logging.info("Removing user %s (UID %d) from group %s (GID %d).", - username, uid, groupname, gid) - result, output = self.call_popen(["gpasswd", "-d", username, - groupname]) - if result != 0: - raise UserManagementError("Error removing user %s (UID %d) " - "from group %s (GID (%d).\n%s" - % (username, uid, groupname, - gid, output)) + logging.info( + "Removing user %s (UID %d) from group %s (GID %d).", + username, + uid, + groupname, + gid, + ) + result, output = self.call_popen( + ["gpasswd", "-d", username, groupname], + ) + if result != 0: + raise UserManagementError( + f"Error removing user {username} (UID {uid:d}) " + f"from group {groupname} (GID ({gid:d}).\n{output}", + ) return output def remove_group(self, groupname): @@ -199,14 +270,17 @@ logging.info("Removing group %s (GID %d).", groupname, gid) result, output = self.call_popen(["groupdel", groupname]) if result != 0: - raise UserManagementError("Error removing group %s (GID %d).\n%s" % - (groupname, gid, output)) + raise UserManagementError( + f"Error removing group {groupname} (GID {gid:d}).\n{output}", + ) return output def call_popen(self, args): - popen = self._provider.popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + popen = self._provider.popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) output = popen.stdout.read() result = popen.wait() return result, output diff -Nru landscape-client-23.02/landscape/client/user/provider.py landscape-client-23.08/landscape/client/user/provider.py --- landscape-client-23.02/landscape/client/user/provider.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/provider.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,8 @@ -from grp import struct_group -from pwd import struct_passwd import csv import logging import subprocess +from grp import struct_group +from pwd import struct_passwd from landscape.lib.compat import _PY3 @@ -19,7 +19,7 @@ """Raised when a group couldn't be found by gid/groupname.""" -class UserProviderBase(object): +class UserProviderBase: """This is a base class for user Providers.""" def __init__(self, locked_users=None): @@ -47,11 +47,18 @@ gecos_data.append(None) name, location, work_phone, home_phone = tuple(gecos_data) enabled = user.pw_name not in self.locked_users - users.append({"username": user.pw_name, "name": name, - "uid": user.pw_uid, "enabled": enabled, - "location": location, "work-phone": work_phone, - "home-phone": home_phone, - "primary-gid": user.pw_gid}) + users.append( + { + "username": user.pw_name, + "name": name, + "uid": user.pw_uid, + "enabled": enabled, + "location": location, + "work-phone": work_phone, + "home-phone": home_phone, + "primary-gid": user.pw_gid, + }, + ) found_usernames.add(user.pw_name) return users @@ -61,7 +68,7 @@ Each group is represented as a dict with the keys: C{name}, C{gid} and C{members}. """ - user_names = set([x["username"] for x in self.get_users()]) + user_names = {x["username"] for x in self.get_users()} groups = [] found_groupnames = set() for group in self.get_group_data(): @@ -70,8 +77,13 @@ if group.gr_name in found_groupnames: continue member_names = user_names.intersection(group.gr_mem) - groups.append({"name": group.gr_name, "gid": group.gr_gid, - "members": sorted(list(member_names))}) + groups.append( + { + "name": group.gr_name, + "gid": group.gr_gid, + "members": sorted(list(member_names)), + }, + ) found_groupnames.add(group.gr_name) return groups @@ -84,7 +96,7 @@ for data in self.get_users(): if data["username"] == username: return data["uid"] - raise UserNotFoundError("UID not found for user %s." % username) + raise UserNotFoundError(f"UID not found for user {username}.") def get_gid(self, groupname): """Returns the GID for C{groupname}. @@ -95,20 +107,31 @@ for data in self.get_groups(): if data["name"] == groupname: return data["gid"] - raise GroupNotFoundError("Group not found for group %s." % groupname) + raise GroupNotFoundError(f"Group not found for group {groupname}.") class UserProvider(UserProviderBase): popen = subprocess.Popen - passwd_fields = ["username", "passwd", "uid", "primary-gid", "gecos", - "home", "shell"] + passwd_fields = [ + "username", + "passwd", + "uid", + "primary-gid", + "gecos", + "home", + "shell", + ] group_fields = ["name", "passwd", "gid", "members"] - def __init__(self, locked_users=[], passwd_file="/etc/passwd", - group_file="/etc/group"): - super(UserProvider, self).__init__(locked_users) + def __init__( + self, + locked_users=[], + passwd_file="/etc/passwd", + group_file="/etc/group", + ): + super().__init__(locked_users) self._passwd_file = passwd_file self._group_file = group_file @@ -125,31 +148,44 @@ # explicitly indicate the encoding as we cannot rely on the system # default encoding. if _PY3: - open_params = dict(encoding="utf-8", errors='replace') + open_params = dict(encoding="utf-8", errors="replace") else: open_params = dict() with open(self._passwd_file, "r", **open_params) as passwd_file: reader = csv.DictReader( - passwd_file, fieldnames=self.passwd_fields, delimiter=":", - quoting=csv.QUOTE_NONE) + passwd_file, + fieldnames=self.passwd_fields, + delimiter=":", + quoting=csv.QUOTE_NONE, + ) current_line = 0 for row in reader: current_line += 1 # This skips the NIS user marker in the passwd file. - if (row["username"].startswith("+") or - row["username"].startswith("-")): + if row["username"].startswith("+") or row[ + "username" + ].startswith("-"): continue gecos = row["gecos"] if not _PY3 and gecos is not None: gecos = gecos.decode("utf-8", "replace") try: - user_data.append((row["username"], row["passwd"], - int(row["uid"]), int(row["primary-gid"]), - gecos, row["home"], row["shell"])) + user_data.append( + ( + row["username"], + row["passwd"], + int(row["uid"]), + int(row["primary-gid"]), + gecos, + row["home"], + row["shell"], + ), + ) except (ValueError, TypeError): logging.warn( - "passwd file %s is incorrectly formatted: line %d." - % (self._passwd_file, current_line)) + f"passwd file {self._passwd_file} is incorrectly " + f"formatted: line {current_line:d}.", + ) return user_data def get_group_data(self): @@ -160,19 +196,31 @@ """ group_data = [] group_file = open(self._group_file, "r") - reader = csv.DictReader(group_file, fieldnames=self.group_fields, - delimiter=":", quoting=csv.QUOTE_NONE) + reader = csv.DictReader( + group_file, + fieldnames=self.group_fields, + delimiter=":", + quoting=csv.QUOTE_NONE, + ) current_line = 0 for row in reader: current_line += 1 # Skip if we find the NIS marker - if (row["name"].startswith("+") or row["name"].startswith("-")): + if row["name"].startswith("+") or row["name"].startswith("-"): continue try: - group_data.append((row["name"], row["passwd"], int(row["gid"]), - row["members"].split(","))) + group_data.append( + ( + row["name"], + row["passwd"], + int(row["gid"]), + row["members"].split(","), + ), + ) except (AttributeError, ValueError): - logging.warn("group file %s is incorrectly formatted: " - "line %d." % (self._group_file, current_line)) + logging.warn( + f"group file {self._group_file} is incorrectly formatted: " + f"line {current_line:d}.", + ) group_file.close() return group_data diff -Nru landscape-client-23.02/landscape/client/user/tests/helpers.py landscape-client-23.08/landscape/client/user/tests/helpers.py --- landscape-client-23.02/landscape/client/user/tests/helpers.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/tests/helpers.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,11 @@ -from twisted.python.compat import iteritems, itervalues +from twisted.python.compat import iteritems +from twisted.python.compat import itervalues from landscape.client.user.management import UserManagementError from landscape.client.user.provider import UserProviderBase -class FakeUserManagement(object): - +class FakeUserManagement: def __init__(self, provider=None): self.shadow_file = getattr(provider, "shadow_file", None) self.provider = provider @@ -18,16 +18,25 @@ self._groups[data["name"]] = data def _make_fake_shadow_file(self, locked_users, unlocked_users): - entry = "%s:%s:13348:0:99999:7:::\n" + entry = "{}:{}:13348:0:99999:7:::\n" shadow_file = open(self.shadow_file, "w") for user in locked_users: - shadow_file.write(entry % (user, "!")) + shadow_file.write(entry.format(user, "!")) for user in unlocked_users: - shadow_file.write(entry % (user, "qweqweqeqweqw")) + shadow_file.write(entry.format(user, "qweqweqeqweqw")) shadow_file.close() - def add_user(self, username, name, password, require_password_reset, - primary_group_name, location, work_phone, home_phone): + def add_user( + self, + username, + name, + password, + require_password_reset, + primary_group_name, + location, + work_phone, + home_phone, + ): try: uid = 1000 if self._users: @@ -36,15 +45,31 @@ primary_gid = self.get_gid(primary_group_name) else: primary_gid = uid - self._users[uid] = {"username": username, "name": name, - "uid": uid, "enabled": True, - "location": location, "work-phone": work_phone, - "home-phone": home_phone, - "primary-gid": primary_gid} - gecos_string = "%s,%s,%s,%s" % (name, location or "", - work_phone or "", home_phone or "") - userdata = (username, "x", uid, primary_gid, gecos_string, - "/bin/sh", "/home/user") + self._users[uid] = { + "username": username, + "name": name, + "uid": uid, + "enabled": True, + "location": location, + "work-phone": work_phone, + "home-phone": home_phone, + "primary-gid": primary_gid, + } + gecos_string = "{},{},{},{}".format( + name, + location or "", + work_phone or "", + home_phone or "", + ) + userdata = ( + username, + "x", + uid, + primary_gid, + gecos_string, + "/bin/sh", + "/home/user", + ) self.provider.users.append(userdata) except KeyError: raise UserManagementError("add_user failed") @@ -81,25 +106,38 @@ self.provider.users = remaining_users return "remove_user succeeded" - def set_user_details(self, username, password=None, name=None, - location=None, work_number=None, home_number=None, - primary_group_name=None): + def set_user_details( + self, + username, + password=None, + name=None, + location=None, + work_number=None, + home_number=None, + primary_group_name=None, + ): data = self._users.setdefault(username, {}) - for key, value in [("name", name), - ("location", location), - ("work-phone", work_number), - ("home-phone", home_number), - ]: + for key, value in [ + ("name", name), + ("location", location), + ("work-phone", work_number), + ("home-phone", home_number), + ]: if value: data[key] = value if primary_group_name: data["primary-gid"] = self.get_gid(primary_group_name) else: data["primary-gid"] = None - userdata = (username, "x", data["uid"], data["primary-gid"], - "%s,%s,%s,%s," % (name, location, work_number, - home_number), - "/bin/sh", "/home/user") + userdata = ( + username, + "x", + data["uid"], + data["primary-gid"], + f"{name},{location},{work_number},{home_number},", + "/bin/sh", + "/home/user", + ) self.provider.users = [userdata] return "set_user_details succeeded" @@ -107,7 +145,7 @@ try: return self._groups[name]["gid"] except KeyError: - raise UserManagementError("Group %s wasn't found." % name) + raise UserManagementError(f"Group {name} wasn't found.") def add_group(self, name): gid = 1000 @@ -154,15 +192,20 @@ class FakeUserProvider(UserProviderBase): - - def __init__(self, users=None, groups=None, popen=None, shadow_file=None, - locked_users=None): + def __init__( + self, + users=None, + groups=None, + popen=None, + shadow_file=None, + locked_users=None, + ): self.users = users self.groups = groups if popen: self.popen = popen self.shadow_file = shadow_file - super(FakeUserProvider, self).__init__(locked_users=locked_users) + super().__init__(locked_users=locked_users) def get_user_data(self, system=False): if self.users is None: @@ -175,7 +218,7 @@ return self.groups -class FakeUserInfo(object): +class FakeUserInfo: """Implements enough functionality to work for Changes tests.""" persist_name = "users" diff -Nru landscape-client-23.02/landscape/client/user/tests/test_changes.py landscape-client-23.08/landscape/client/user/tests/test_changes.py --- landscape-client-23.02/landscape/client/user/tests/test_changes.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/tests/test_changes.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,9 @@ -from landscape.lib.persist import Persist +from landscape.client.tests.helpers import LandscapeTest +from landscape.client.tests.helpers import MonitorHelper from landscape.client.user.changes import UserChanges -from landscape.client.user.tests.helpers import FakeUserInfo, FakeUserProvider - -from landscape.client.tests.helpers import LandscapeTest, MonitorHelper +from landscape.client.user.tests.helpers import FakeUserInfo +from landscape.client.user.tests.helpers import FakeUserProvider +from landscape.lib.persist import Persist class UserChangesTest(LandscapeTest): @@ -10,13 +11,15 @@ helpers = [MonitorHelper] def setUp(self): - super(UserChangesTest, self).setUp() + super().setUp() self.persist = Persist() - self.shadow_file = self.makeFile("""\ + self.shadow_file = self.makeFile( + """\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: -""") +""", + ) def test_no_existing_snapshot(self): """ @@ -30,17 +33,25 @@ FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) - self.assertEqual(changes.create_diff(), - {"create-users": [{"username": "jdoe", - "home-phone": None, - "name": u"JD", - "enabled": True, - "location": None, - "work-phone": None, - "uid": 1000, - "primary-gid": 1000}], - "create-groups": [{"gid": 1000, "name": "webdev"}], - "create-group-members": {"webdev": ["jdoe"]}}) + self.assertEqual( + changes.create_diff(), + { + "create-users": [ + { + "username": "jdoe", + "home-phone": None, + "name": "JD", + "enabled": True, + "location": None, + "work-phone": None, + "uid": 1000, + "primary-gid": 1000, + }, + ], + "create-groups": [{"gid": 1000, "name": "webdev"}], + "create-group-members": {"webdev": ["jdoe"]}, + }, + ) def test_snapshot(self): """ @@ -118,15 +129,23 @@ changes.create_diff() changes.snapshot() users.append(("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")) - self.assertEqual(changes.create_diff(), - {"create-users": [{"username": "bo", - "home-phone": None, - "name": u"Bo", - "enabled": True, - "location": None, - "work-phone": None, - "uid": 1001, - "primary-gid": 1001}]}) + self.assertEqual( + changes.create_diff(), + { + "create-users": [ + { + "username": "bo", + "home-phone": None, + "name": "Bo", + "enabled": True, + "location": None, + "work-phone": None, + "uid": 1001, + "primary-gid": 1001, + }, + ], + }, + ) def test_update_user(self): """ @@ -139,25 +158,42 @@ changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() - users[0] = ("jdoe", "x", 1000, 1001, "John Doe,Here,789WORK,321HOME", - "/home/john", "/bin/zsh") - self.assertEqual(changes.create_diff(), - {"update-users": [{"username": "jdoe", - "home-phone": u"321HOME", - "name": u"John Doe", - "enabled": True, - "location": "Here", - "work-phone": "789WORK", - "uid": 1000, - "primary-gid": 1001}]}) + users[0] = ( + "jdoe", + "x", + 1000, + 1001, + "John Doe,Here,789WORK,321HOME", + "/home/john", + "/bin/zsh", + ) + self.assertEqual( + changes.create_diff(), + { + "update-users": [ + { + "username": "jdoe", + "home-phone": "321HOME", + "name": "John Doe", + "enabled": True, + "location": "Here", + "work-phone": "789WORK", + "uid": 1000, + "primary-gid": 1001, + }, + ], + }, + ) def test_delete_user(self): """ L{UserChanges.create_diff} should report users removed externally with C{deluser} or similar tools. """ - users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), - ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")] + users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh"), + ] provider = FakeUserProvider(users=users) FakeUserInfo(provider=provider) @@ -181,8 +217,10 @@ changes.create_diff() changes.snapshot() groups.append(("bizdev", "x", 60, [])) - self.assertEqual(changes.create_diff(), - {"create-groups": [{"gid": 60, "name": "bizdev"}]}) + self.assertEqual( + changes.create_diff(), + {"create-groups": [{"gid": 60, "name": "bizdev"}]}, + ) def test_add_group_with_members(self): """ @@ -198,9 +236,13 @@ changes.create_diff() changes.snapshot() groups.append(("bizdev", "x", 60, ["jdoe"])) - self.assertEqual(changes.create_diff(), - {"create-groups": [{"gid": 60, "name": "bizdev"}], - "create-group-members": {"bizdev": ["jdoe"]}}) + self.assertEqual( + changes.create_diff(), + { + "create-groups": [{"gid": 60, "name": "bizdev"}], + "create-group-members": {"bizdev": ["jdoe"]}, + }, + ) def test_update_group(self): """ @@ -215,16 +257,20 @@ changes.create_diff() changes.snapshot() groups[0] = ("webdev", "x", 1001, []) - self.assertEqual(changes.create_diff(), - {"update-groups": [{"gid": 1001, "name": "webdev"}]}) + self.assertEqual( + changes.create_diff(), + {"update-groups": [{"gid": 1001, "name": "webdev"}]}, + ) def test_add_group_members(self): """ L{UserChanges.create_diff} should report new members added to groups externally with C{gpasswd} or similar tools. """ - users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), - ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")] + users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh"), + ] groups = [("webdev", "x", 50, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) @@ -233,8 +279,10 @@ changes.create_diff() changes.snapshot() groups[0] = ("webdev", "x", 50, ["jdoe", "bo"]) - self.assertEqual(changes.create_diff(), - {"create-group-members": {"webdev": ["bo"]}}) + self.assertEqual( + changes.create_diff(), + {"create-group-members": {"webdev": ["bo"]}}, + ) def test_delete_group_members(self): """ @@ -250,8 +298,10 @@ changes.create_diff() changes.snapshot() groups[0] = ("webdev", "x", 50, []) - self.assertEqual(changes.create_diff(), - {"delete-group-members": {"webdev": ["jdoe"]}}) + self.assertEqual( + changes.create_diff(), + {"delete-group-members": {"webdev": ["jdoe"]}}, + ) def test_delete_group(self): """ @@ -274,10 +324,11 @@ L{UserChanges.create_diff} should be able to report multiple kinds of changes at the same time. """ - users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), - ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")] - groups = [("webdev", "x", 50, ["jdoe"]), - ("bizdev", "x", 60, ["bo"])] + users = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), + ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh"), + ] + groups = [("webdev", "x", 50, ["jdoe"]), ("bizdev", "x", 60, ["bo"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) @@ -293,9 +344,16 @@ self.assertCountEqual( changes.create_diff(), - {"create-groups": [{"gid": 50, "name": "developers"}, - {"gid": 70, "name": "sales"}], - "delete-users": ["jdoe"], - "delete-groups": ["webdev"], - "create-group-members": {"developers": ["bo"], - "sales": ["bo"]}}) + { + "create-groups": [ + {"gid": 50, "name": "developers"}, + {"gid": 70, "name": "sales"}, + ], + "delete-users": ["jdoe"], + "delete-groups": ["webdev"], + "create-group-members": { + "developers": ["bo"], + "sales": ["bo"], + }, + }, + ) diff -Nru landscape-client-23.02/landscape/client/user/tests/test_management.py landscape-client-23.08/landscape/client/user/tests/test_management.py --- landscape-client-23.02/landscape/client/user/tests/test_management.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/tests/test_management.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,34 +1,51 @@ -from landscape.lib.testing import MockPopen -from landscape.client.user.management import ( - UserManagement, UserManagementError) -from landscape.client.user.tests.helpers import FakeUserProvider -from landscape.client.user.provider import ( - UserNotFoundError, GroupNotFoundError) from landscape.client.tests.helpers import LandscapeTest +from landscape.client.user.management import UserManagement +from landscape.client.user.management import UserManagementError +from landscape.client.user.provider import GroupNotFoundError +from landscape.client.user.provider import UserNotFoundError +from landscape.client.user.tests.helpers import FakeUserProvider +from landscape.lib.testing import MockPopen class UserWriteTest(LandscapeTest): - def setUp(self): LandscapeTest.setUp(self) - self.shadow_file = self.makeFile("""\ + self.shadow_file = self.makeFile( + """\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: -""") +""", + ) def test_add_user(self): """L{UserManagement.add_user} should use C{adduser} to add users.""" groups = [("users", "x", 1001, [])] provider = FakeUserProvider(groups=groups, popen=MockPopen("")) management = UserManagement(provider=provider) - management.add_user("jdoe", "John Doe", "password", False, "users", - "Room 101", "+123456", None) + management.add_user( + "jdoe", + "John Doe", + "password", + False, + "users", + "Room 101", + "+123456", + None, + ) self.assertEqual(len(provider.popen.popen_inputs), 2) - self.assertEqual(provider.popen.popen_inputs[0], - ["adduser", "jdoe", "--disabled-password", - "--gecos", "John Doe,Room 101,+123456,", - "--gid", "1001"]) + self.assertEqual( + provider.popen.popen_inputs[0], + [ + "adduser", + "jdoe", + "--disabled-password", + "--gecos", + "John Doe,Room 101,+123456,", + "--gid", + "1001", + ], + ) chpasswd = provider.popen.popen_inputs[1] self.assertEqual(len(chpasswd), 1, chpasswd) @@ -41,9 +58,18 @@ """ provider = FakeUserProvider(popen=MockPopen("", return_codes=[1, 0])) management = UserManagement(provider=provider) - self.assertRaises(UserManagementError, management.add_user, - "jdoe", u"John Doe", "password", False, None, None, - None, None) + self.assertRaises( + UserManagementError, + management.add_user, + "jdoe", + "John Doe", + "password", + False, + None, + None, + None, + None, + ) def test_change_password_error(self): """ @@ -54,10 +80,20 @@ provider.popen.err_out = b"PAM is unhappy" management = UserManagement(provider=provider) with self.assertRaises(UserManagementError) as e: - management.add_user("jdoe", u"John Doe", "password", False, None, - None, None, None) + management.add_user( + "jdoe", + "John Doe", + "password", + False, + None, + None, + None, + None, + ) expected = "Error setting password for user {}.\n {}".format( - b"jdoe", b"PAM is unhappy") + b"jdoe", + b"PAM is unhappy", + ) self.assertEqual(expected, str(e.exception)) def test_expire_password_error(self): @@ -66,11 +102,21 @@ C{passwd} fails. """ provider = FakeUserProvider( - popen=MockPopen("", return_codes=[0, 0, 1])) + popen=MockPopen("", return_codes=[0, 0, 1]), + ) management = UserManagement(provider=provider) - self.assertRaises(UserManagementError, management.add_user, - "jdoe", u"John Doe", "password", True, None, None, - None, None) + self.assertRaises( + UserManagementError, + management.add_user, + "jdoe", + "John Doe", + "password", + True, + None, + None, + None, + None, + ) def test_set_password(self): """ @@ -78,8 +124,11 @@ a user's password. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("jdoe", password="password") @@ -92,9 +141,11 @@ users. """ data = [("root", "x", 0, 0, ",,,,", "/home/root", "/bin/zsh")] - provider = FakeUserProvider(users=data, - shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("root", password="password") self.assertEqual(len(provider.popen.popen_inputs), 1) @@ -107,10 +158,13 @@ non-ASCII-encodable still probably won't work). """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) - management.set_user_details("jdoe", password=u"password") + management.set_user_details("jdoe", password="password") self.assertEqual(len(provider.popen.popen_inputs), 1) self.assertEqual(b"jdoe:password", provider.popen.received_input) @@ -121,14 +175,19 @@ change a user's name. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("jdoe", name="JD") self.assertEqual(len(provider.popen.popen_inputs), 1) - self.assertEqual(provider.popen.popen_inputs, - [["chfn", "-f", "JD", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["chfn", "-f", "JD", "jdoe"]], + ) def test_set_location(self): """ @@ -136,44 +195,77 @@ change a user's location. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("jdoe", location="Everywhere") self.assertEqual(len(provider.popen.popen_inputs), 1) - self.assertEqual(provider.popen.popen_inputs, - [["chfn", "-r", "Everywhere", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["chfn", "-r", "Everywhere", "jdoe"]], + ) def test_clear_user_location(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's location. """ - data = [("jdoe", "x", 1000, 1000, "JD,Room 101,,,", "/home/jdoe", - "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + data = [ + ( + "jdoe", + "x", + 1000, + 1000, + "JD,Room 101,,,", + "/home/jdoe", + "/bin/zsh", + ), + ] + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("jdoe", location="") self.assertEqual(len(provider.popen.popen_inputs), 1) - self.assertEqual(provider.popen.popen_inputs, - [["chfn", "-r", "", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["chfn", "-r", "", "jdoe"]], + ) def test_clear_telephone_numbers(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's telephone numbers. """ - data = [("jdoe", "x", 1000, 1000, "JD,,+123456,+123456", "/home/jdoe", - "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + data = [ + ( + "jdoe", + "x", + 1000, + 1000, + "JD,,+123456,+123456", + "/home/jdoe", + "/bin/zsh", + ), + ] + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("jdoe", home_number="", work_number="") self.assertEqual(len(provider.popen.popen_inputs), 1) - self.assertEqual(provider.popen.popen_inputs, - [["chfn", "-w", "", "-h", "", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["chfn", "-w", "", "-h", "", "jdoe"]], + ) def test_set_user_details_fails(self): """ @@ -181,11 +273,18 @@ L{EditUserError} if C{chfn} fails. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("", return_codes=[1])) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("", return_codes=[1]), + ) management = UserManagement(provider=provider) - self.assertRaises(UserNotFoundError, management.set_user_details, 1000, - name="John Doe") + self.assertRaises( + UserNotFoundError, + management.set_user_details, + 1000, + name="John Doe", + ) def test_contact_details_in_general(self): """ @@ -193,20 +292,38 @@ change a user's contact details. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) - management = UserManagement(provider=provider) - location = u"Everywhere" - work_number = u"1-800-123-4567" - home_number = u"764-4321" - management.set_user_details("jdoe", location=location, - work_number=work_number, - home_number=home_number) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) + management = UserManagement(provider=provider) + location = "Everywhere" + work_number = "1-800-123-4567" + home_number = "764-4321" + management.set_user_details( + "jdoe", + location=location, + work_number=work_number, + home_number=home_number, + ) self.assertEqual(len(provider.popen.popen_inputs), 1) - self.assertEqual(provider.popen.popen_inputs, - [["chfn", "-r", location, "-w", work_number, - "-h", home_number, "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [ + [ + "chfn", + "-r", + location, + "-w", + work_number, + "-h", + home_number, + "jdoe", + ], + ], + ) def test_set_user_details_with_unknown_username(self): """ @@ -215,8 +332,12 @@ """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) - self.assertRaises(UserNotFoundError, management.set_user_details, - "kevin", name=u"John Doe") + self.assertRaises( + UserNotFoundError, + management.set_user_details, + "kevin", + name="John Doe", + ) def test_set_primary_group(self): """ @@ -225,15 +346,20 @@ """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("users", "x", 1001, [])] - provider = FakeUserProvider(users=data, groups=groups, - shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + groups=groups, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_user_details("jdoe", primary_group_name="users") - self.assertEqual(provider.popen.popen_inputs, - [["usermod", "-g", "1001", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["usermod", "-g", "1001", "jdoe"]], + ) def test_set_primary_group_unknown_group(self): """ @@ -243,24 +369,34 @@ """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("staff", "x", 1001, [])] - provider = FakeUserProvider(users=data, groups=groups, - shadow_file=self.shadow_file, - popen=MockPopen("group id 1002 unknown", - return_codes=[1]), - ) + provider = FakeUserProvider( + users=data, + groups=groups, + shadow_file=self.shadow_file, + popen=MockPopen("group id 1002 unknown", return_codes=[1]), + ) management = UserManagement(provider=provider) - self.assertRaises(GroupNotFoundError, management.set_user_details, - "jdoe", primary_group_name="unknown") + self.assertRaises( + GroupNotFoundError, + management.set_user_details, + "jdoe", + primary_group_name="unknown", + ) def test_lock_user(self): """L{UserManagement.lock_user} should use C{usermod} to lock users.""" data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.lock_user("jdoe") - self.assertEqual(provider.popen.popen_inputs, - [["usermod", "-L", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["usermod", "-L", "jdoe"]], + ) def test_lock_user_fails(self): """ @@ -268,8 +404,11 @@ a C{usermod} fails. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("", [1])) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("", [1]), + ) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.lock_user, 1000) @@ -288,12 +427,17 @@ users. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("no output")) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.unlock_user("jdoe") - self.assertEqual(provider.popen.popen_inputs, - [["usermod", "-U", "jdoe"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["usermod", "-U", "jdoe"]], + ) def test_unlock_user_fails(self): """ @@ -301,8 +445,11 @@ L{UserManagementError} if a C{usermod} fails. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=MockPopen("", [1])) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=MockPopen("", [1]), + ) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.unlock_user, 1000) @@ -322,8 +469,11 @@ """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] popen = MockPopen("Removing user `jdoe'...\r\ndone.") - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=popen) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=popen, + ) management = UserManagement(provider=provider) management.remove_user("jdoe") self.assertEqual(popen.popen_inputs, [["deluser", "jdoe"]]) @@ -344,10 +494,16 @@ """ self.log_helper.ignore_errors(UserNotFoundError) data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - popen = MockPopen("/usr/sbin/deluser: Only root may remove a user or " - "group from the system.", [1]) - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=popen) + popen = MockPopen( + "/usr/sbin/deluser: Only root may remove a user or " + "group from the system.", + [1], + ) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=popen, + ) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.remove_user, "smith") @@ -358,23 +514,29 @@ """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] popen = MockPopen("Removing user `jdoe`...\r\ndone.", [0]) - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - popen=popen) + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + popen=popen, + ) management = UserManagement(provider=provider) management.remove_user("jdoe", delete_home=True) - self.assertEqual(popen.popen_inputs, - [["deluser", "jdoe", "--remove-home"]]) + self.assertEqual( + popen.popen_inputs, + [["deluser", "jdoe", "--remove-home"]], + ) class GroupWriteTest(LandscapeTest): - def setUp(self): LandscapeTest.setUp(self) - self.shadow_file = self.makeFile("""\ + self.shadow_file = self.makeFile( + """\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: -""") +""", + ) def test_add_group(self): """ @@ -384,8 +546,7 @@ provider = FakeUserProvider(popen=MockPopen("Result")) management = UserManagement(provider=provider) result = management.add_group("webdev") - self.assertEqual(provider.popen.popen_inputs, - [["addgroup", "webdev"]]) + self.assertEqual(provider.popen.popen_inputs, [["addgroup", "webdev"]]) self.assertEqual(result, "Result") def test_add_group_handles_errors(self): @@ -405,13 +566,18 @@ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider( - users=users, shadow_file=self.shadow_file, - groups=groups, popen=MockPopen("no output")) + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=MockPopen("no output"), + ) management = UserManagement(provider=provider) management.set_group_details("bizdev", "sales") - self.assertEqual(provider.popen.popen_inputs, - [["groupmod", "-n", "sales", "bizdev"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["groupmod", "-n", "sales", "bizdev"]], + ) def test_set_group_details_with_unknown_groupname(self): """ @@ -420,8 +586,12 @@ """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) - self.assertRaises(GroupNotFoundError, management.set_group_details, - "sales", u"newsales") + self.assertRaises( + GroupNotFoundError, + management.set_group_details, + "sales", + "newsales", + ) def test_set_group_details_fails(self): """ @@ -431,11 +601,19 @@ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] popen = MockPopen("groupmod: sales is not a unique name", [1]) - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, popen=popen) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=popen, + ) management = UserManagement(provider=provider) - self.assertRaises(UserManagementError, management.set_group_details, - "bizdev", u"sales") + self.assertRaises( + UserManagementError, + management.set_group_details, + "bizdev", + "sales", + ) def test_add_member(self): """ @@ -444,16 +622,19 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, - popen=MockPopen("Removing user jdoe " - "from group bizdev"), - ) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=MockPopen("Removing user jdoe " "from group bizdev"), + ) management = UserManagement(provider=provider) output = management.add_group_member("jdoe", "bizdev") - self.assertEqual(provider.popen.popen_inputs, - [["gpasswd", "-a", "jdoe", "bizdev"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["gpasswd", "-a", "jdoe", "bizdev"]], + ) self.assertEqual(output, "Removing user jdoe from group bizdev") def test_add_member_with_unknown_groupname(self): @@ -463,11 +644,18 @@ exist. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - popen=MockPopen("")) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + popen=MockPopen(""), + ) management = UserManagement(provider=provider) - self.assertRaises(GroupNotFoundError, management.add_group_member, - "jdoe", "bizdev") + self.assertRaises( + GroupNotFoundError, + management.add_group_member, + "jdoe", + "bizdev", + ) def test_add_member_with_unknown_username(self): """ @@ -478,8 +666,12 @@ groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(groups=groups, popen=MockPopen("")) management = UserManagement(provider=provider) - self.assertRaises(UserNotFoundError, management.add_group_member, - "bizdev", "smith") + self.assertRaises( + UserNotFoundError, + management.add_group_member, + "bizdev", + "smith", + ) def test_add_member_failure(self): """ @@ -489,12 +681,19 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, - popen=MockPopen("no output", [1])) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=MockPopen("no output", [1]), + ) management = UserManagement(provider=provider) - self.assertRaises(UserNotFoundError, management.add_group_member, - 1000, 1001) + self.assertRaises( + UserNotFoundError, + management.add_group_member, + 1000, + 1001, + ) def test_remove_member(self): """ @@ -504,15 +703,18 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, - popen=MockPopen("Removing user jdoe " - "from group bizdev"), - ) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=MockPopen("Removing user jdoe " "from group bizdev"), + ) management = UserManagement(provider=provider) output = management.remove_group_member("jdoe", "bizdev") - self.assertEqual(provider.popen.popen_inputs, - [["gpasswd", "-d", "jdoe", "bizdev"]]) + self.assertEqual( + provider.popen.popen_inputs, + [["gpasswd", "-d", "jdoe", "bizdev"]], + ) self.assertEqual(output, "Removing user jdoe from group bizdev") def test_remove_member_with_unknown_groupname(self): @@ -522,11 +724,18 @@ doesn't exist. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - popen=MockPopen("", return_codes=[2])) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + popen=MockPopen("", return_codes=[2]), + ) management = UserManagement(provider=provider) - self.assertRaises(GroupNotFoundError, management.remove_group_member, - "jdoe", "bizdev") + self.assertRaises( + GroupNotFoundError, + management.remove_group_member, + "jdoe", + "bizdev", + ) def test_remove_member_with_unknown_username(self): """ @@ -535,11 +744,17 @@ exist. """ groups = [("bizdev", "x", 1001, [])] - provider = FakeUserProvider(groups=groups, - popen=MockPopen("", return_codes=[4])) + provider = FakeUserProvider( + groups=groups, + popen=MockPopen("", return_codes=[4]), + ) management = UserManagement(provider=provider) - self.assertRaises(UserNotFoundError, management.remove_group_member, - "jdoe", "bizdev") + self.assertRaises( + UserNotFoundError, + management.remove_group_member, + "jdoe", + "bizdev", + ) def test_remove_member_failure(self): """ @@ -549,12 +764,19 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, - popen=MockPopen("no output", [1])) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=MockPopen("no output", [1]), + ) management = UserManagement(provider=provider) - self.assertRaises(UserManagementError, - management.remove_group_member, "jdoe", "bizdev") + self.assertRaises( + UserManagementError, + management.remove_group_member, + "jdoe", + "bizdev", + ) def test_remove_group(self): """ @@ -564,12 +786,15 @@ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 50, [])] popen = MockPopen("Removing group `bizdev'...\r\ndone.") - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, popen=popen) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=popen, + ) management = UserManagement(provider=provider) management.remove_group("bizdev") - self.assertEqual(provider.popen.popen_inputs, - [["groupdel", "bizdev"]]) + self.assertEqual(provider.popen.popen_inputs, [["groupdel", "bizdev"]]) def test_remove_group_with_unknown_groupname(self): """ @@ -579,7 +804,10 @@ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises( - GroupNotFoundError, management.remove_group, "ubuntu") + GroupNotFoundError, + management.remove_group, + "ubuntu", + ) def test_remove_group_fails(self): """ @@ -588,10 +816,20 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 50, [])] - popen = MockPopen("/usr/sbin/deluser: Only root may remove a user or " - "group from the system.", [1]) - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups, popen=popen) + popen = MockPopen( + "/usr/sbin/deluser: Only root may remove a user or " + "group from the system.", + [1], + ) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + popen=popen, + ) management = UserManagement(provider=provider) self.assertRaises( - GroupNotFoundError, management.remove_group, "ubuntu") + GroupNotFoundError, + management.remove_group, + "ubuntu", + ) diff -Nru landscape-client-23.02/landscape/client/user/tests/test_provider.py landscape-client-23.08/landscape/client/user/tests/test_provider.py --- landscape-client-23.02/landscape/client/user/tests/test_provider.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/user/tests/test_provider.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,34 +1,39 @@ -import pwd import grp - -from landscape.client.user.provider import ( - UserProvider, UserNotFoundError, GroupNotFoundError) +import pwd from landscape.client.tests.helpers import LandscapeTest +from landscape.client.user.provider import GroupNotFoundError +from landscape.client.user.provider import UserNotFoundError +from landscape.client.user.provider import UserProvider from landscape.client.user.tests.helpers import FakeUserProvider class ProviderTest(LandscapeTest): - def setUp(self): LandscapeTest.setUp(self) - self.shadow_file = self.makeFile("""\ + self.shadow_file = self.makeFile( + """\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: -""") +""", + ) - self.passwd_file = self.makeFile("""\ + self.passwd_file = self.makeFile( + """\ root:x:0:0:root:/root:/bin/bash haldaemon:x:107:116:Hardware abstraction layer,,,:/home/haldaemon:/bin/false kevin:x:1001:65534:Kevin,101,+44123123,+44123124:/home/kevin:/bin/bash -""") +""", + ) - self.group_file = self.makeFile("""\ + self.group_file = self.makeFile( + """\ root:x:0: cdrom:x:24:haldaemon,kevin kevin:x:1000: -""") +""", + ) def test_get_uid(self): """ @@ -45,11 +50,21 @@ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "name": u"JD", - "uid": 1000, "enabled": True, - "location": None, "home-phone": None, - "work-phone": None, - "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "name": "JD", + "uid": 1000, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_gecos_data(self): """ @@ -57,29 +72,65 @@ correctly parsed out of the GECOS field, and included in the users message. """ - data = [("jdoe", "x", 1000, 1000, "JD,Everywhere,7654321,123HOME,", - "/home/jdoe", "/bin/zsh")] + data = [ + ( + "jdoe", + "x", + 1000, + 1000, + "JD,Everywhere,7654321,123HOME,", + "/home/jdoe", + "/bin/zsh", + ), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "name": u"JD", - "uid": 1000, "enabled": True, - "location": u"Everywhere", - "home-phone": u"123HOME", - "work-phone": u"7654321", - "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "name": "JD", + "uid": 1000, + "enabled": True, + "location": "Everywhere", + "home-phone": "123HOME", + "work-phone": "7654321", + "primary-gid": 1000, + }, + ], + ) def test_four_gecos_fields(self): """If a GECOS field only has four fields it should still work.""" - data = [("jdoe", "x", 1000, 1000, "JD,Everywhere,7654321,123HOME", - "/home/jdoe", "/bin/zsh")] + data = [ + ( + "jdoe", + "x", + 1000, + 1000, + "JD,Everywhere,7654321,123HOME", + "/home/jdoe", + "/bin/zsh", + ), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "name": u"JD", - "uid": 1000, "enabled": True, - "location": u"Everywhere", - "home-phone": u"123HOME", - "work-phone": u"7654321", - "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "name": "JD", + "uid": 1000, + "enabled": True, + "location": "Everywhere", + "home-phone": "123HOME", + "work-phone": "7654321", + "primary-gid": 1000, + }, + ], + ) def test_old_school_gecos_data(self): """ @@ -87,29 +138,60 @@ will be written as a comment. The client must be resilient to this situation. """ - data = [("jdoe", "x", 1000, 1000, "John Doe", - "/home/jdoe", "/bin/zsh")] + data = [ + ("jdoe", "x", 1000, 1000, "John Doe", "/home/jdoe", "/bin/zsh"), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "uid": 1000, - "enabled": True, "name": u"John Doe", - "location": None, "home-phone": None, - "work-phone": None, "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "uid": 1000, + "enabled": True, + "name": "John Doe", + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_weird_gecos_data(self): """ If GECOS data is malformed in a way that contains less than four fields, read as many as are available. """ - data = [("jdoe", "x", 1000, 1000, "John Doe,Everywhere", - "/home/jdoe", "/bin/zsh")] + data = [ + ( + "jdoe", + "x", + 1000, + 1000, + "John Doe,Everywhere", + "/home/jdoe", + "/bin/zsh", + ), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "uid": 1000, - "enabled": True, "name": "John Doe", - "location": "Everywhere", - "home-phone": None, "work-phone": None, - "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "uid": 1000, + "enabled": True, + "name": "John Doe", + "location": "Everywhere", + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_no_gecos_data(self): """ @@ -119,27 +201,43 @@ data = [("jdoe", "x", 1000, 1000, "", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "uid": 1000, - "enabled": True, "name": None, - "location": None, "home-phone": None, - "work-phone": None, - "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "uid": 1000, + "enabled": True, + "name": None, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_utf8_gecos_data(self): """Gecos fields should be decoded from utf-8 to unicode.""" - name = u"Jos\N{LATIN SMALL LETTER E WITH ACUTE}" - location = u"F\N{LATIN SMALL LETTER I WITH DIAERESIS}nland" - number = u"N\N{LATIN SMALL LETTER AE}ver" - gecos = u"{},{},{},{},".format(name, location, number, number) + name = "Jos\N{LATIN SMALL LETTER E WITH ACUTE}" + location = "F\N{LATIN SMALL LETTER I WITH DIAERESIS}nland" + number = "N\N{LATIN SMALL LETTER AE}ver" + gecos = f"{name},{location},{number},{number}," # We explicitly want to encode this file with utf-8 so we can write in # binary mode and do not rely on the default encoding. - utf8_content = u"""\ + utf8_content = """\ jdoe:x:1000:1000:{}:/home/jdoe:/bin/zsh root:x:0:0:root:/root:/bin/bash -""".format(gecos).encode("utf-8") +""".format( + gecos, + ).encode( + "utf-8", + ) passwd_file = self.makeFile(utf8_content, mode="wb") - provider = UserProvider(passwd_file=passwd_file, - group_file=self.group_file) + provider = UserProvider( + passwd_file=passwd_file, + group_file=self.group_file, + ) users = provider.get_users() self.assertEqual(users[0]["name"], name) self.assertEqual(users[0]["location"], location) @@ -151,13 +249,18 @@ If a GECOS field contains non-UTF8 data, it should be replaced with question marks. """ - passwd_file = self.makeFile(b"""\ + passwd_file = self.makeFile( + b"""\ jdoe:x:1000:1000:\255,\255,\255,\255:/home/jdoe:/bin/zsh root:x:0:0:root:/root:/bin/bash -""", mode="wb") - provider = UserProvider(passwd_file=passwd_file, - group_file=self.group_file) - unicode_unknown = u'\N{REPLACEMENT CHARACTER}' +""", + mode="wb", + ) + provider = UserProvider( + passwd_file=passwd_file, + group_file=self.group_file, + ) + unicode_unknown = "\N{REPLACEMENT CHARACTER}" provider = UserProvider(passwd_file=passwd_file, group_file=None) users = provider.get_users() self.assertEqual(users[0]["name"], unicode_unknown) @@ -167,16 +270,38 @@ def test_get_disabled_user(self): """The C{enabled} field should be C{False} for disabled users.""" - data = [("psmith", "x", 1000, 1000, - "Peter Smith,,,,", "/home/psmith", "/bin/bash")] - provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, - locked_users=["psmith"]) - users = provider.get_users() - self.assertEqual(users, [ - {"username": "psmith", "name": u"Peter Smith", "uid": 1000, - "enabled": False, - "location": None, "home-phone": None, "work-phone": None, - "primary-gid": 1000}]) + data = [ + ( + "psmith", + "x", + 1000, + 1000, + "Peter Smith,,,,", + "/home/psmith", + "/bin/bash", + ), + ] + provider = FakeUserProvider( + users=data, + shadow_file=self.shadow_file, + locked_users=["psmith"], + ) + users = provider.get_users() + self.assertEqual( + users, + [ + { + "username": "psmith", + "name": "Peter Smith", + "uid": 1000, + "enabled": False, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_real_user_data(self): """L{UserProvider} should work with real data.""" @@ -191,40 +316,74 @@ self.assertEqual(user["name"], user_0_name) break else: - self.fail("The user %s (uid=0) was not found in the get_data " - "result." % (user_0.pw_name)) + self.fail( + f"The user {user_0.pw_name} (uid=0) was not found " + "in the get_data result.", + ) def test_get_users_duplicate_usernames(self): """ Get users should return data for all users found on the system, but it should exclude duplicate usernames. """ - data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh"), - ("jdoe", "x", 1001, 1001, "JD,,,,", "/home/jdoe", "/bin/zsh")] + data = [ + ("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh"), + ("jdoe", "x", 1001, 1001, "JD,,,,", "/home/jdoe", "/bin/zsh"), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "jdoe", "name": u"JD", - "uid": 1000, "enabled": True, - "location": None, "home-phone": None, - "work-phone": None, "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "jdoe", + "name": "JD", + "uid": 1000, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_get_users_duplicate_uids(self): """ Get users should return data for all users found on the system, including users with duplicated uids. """ - data = [("joe1", "x", 1000, 1000, "JD,,,,", "/home/joe1", "/bin/zsh"), - ("joe2", "x", 1000, 1000, "JD,,,,", "/home/joe2", "/bin/zsh")] + data = [ + ("joe1", "x", 1000, 1000, "JD,,,,", "/home/joe1", "/bin/zsh"), + ("joe2", "x", 1000, 1000, "JD,,,,", "/home/joe2", "/bin/zsh"), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() - self.assertEqual(users, [{"username": "joe1", "name": u"JD", - "uid": 1000, "enabled": True, - "location": None, "home-phone": None, - "work-phone": None, "primary-gid": 1000}, - {"username": "joe2", "name": u"JD", - "uid": 1000, "enabled": True, - "location": None, "home-phone": None, - "work-phone": None, "primary-gid": 1000}]) + self.assertEqual( + users, + [ + { + "username": "joe1", + "name": "JD", + "uid": 1000, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + { + "username": "joe2", + "name": "JD", + "uid": 1000, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 1000, + }, + ], + ) def test_user_not_in_shadow_file(self): """ @@ -232,8 +391,9 @@ UserProvider error rather than a KeyError. raises a L{UserProviderError} if a match isn't found. """ - data = [("johndoe", "x", 1000, 1000, - "JD,,,,", "/home/jdoe", "/bin/zsh")] + data = [ + ("johndoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh"), + ] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(len(users), 1) @@ -254,17 +414,24 @@ members. """ provider = FakeUserProvider(groups=[("jdoe", "x", 1000, [])]) - self.assertEqual(provider.get_groups(), - [{"name": "jdoe", "gid": 1000, "members": []}]) + self.assertEqual( + provider.get_groups(), + [{"name": "jdoe", "gid": 1000, "members": []}], + ) def test_group_with_members(self): """L{UserProvider.get_groups} should include groups with members.""" users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe"])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups) - self.assertEqual(provider.get_groups(), - [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + ) + self.assertEqual( + provider.get_groups(), + [{"name": "sales", "gid": 50, "members": ["jdoe"]}], + ) def test_group_with_unknown_members(self): """L{UserProvider.get_groups} should include groups with members. @@ -274,10 +441,15 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe", "kevin"])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups) - self.assertEqual(provider.get_groups(), - [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + ) + self.assertEqual( + provider.get_groups(), + [{"name": "sales", "gid": 50, "members": ["jdoe"]}], + ) def test_group_with_duplicate_members(self): """ @@ -288,10 +460,15 @@ """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe", "jdoe"])] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups) - self.assertEqual(provider.get_groups(), - [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + ) + self.assertEqual( + provider.get_groups(), + [{"name": "sales", "gid": 50, "members": ["jdoe"]}], + ) def test_group_with_duplicate_groupnames(self): """ @@ -301,13 +478,19 @@ reported to the server. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] - groups = [("sales", "x", 50, ["jdoe"]), - ("sales", "x", 51, ["jdoe"]), - ] - provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, - groups=groups) - self.assertEqual(provider.get_groups(), - [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) + groups = [ + ("sales", "x", 50, ["jdoe"]), + ("sales", "x", 51, ["jdoe"]), + ] + provider = FakeUserProvider( + users=users, + shadow_file=self.shadow_file, + groups=groups, + ) + self.assertEqual( + provider.get_groups(), + [{"name": "sales", "gid": 50, "members": ["jdoe"]}], + ) def test_real_group_data(self): """ @@ -324,82 +507,127 @@ self.assertEqual(group["members"], group_0.gr_mem) break else: - self.fail("The group %s (gid=0) was not found in the get_data " - "result." % (group_0.gr_name,)) + self.fail( + f"The group {group_0.gr_name} (gid=0) was not found " + "in the get_data result.", + ) def test_get_user_data(self): """This tests the functionality for parsing /etc/passwd style files.""" - provider = UserProvider(passwd_file=self.passwd_file, - group_file=self.group_file) + provider = UserProvider( + passwd_file=self.passwd_file, + group_file=self.group_file, + ) users = provider.get_user_data() - self.assertEqual(users[0], ("root", "x", 0, 0, "root", "/root", - "/bin/bash")) - self.assertEqual(users[1], ("haldaemon", "x", 107, 116, - "Hardware abstraction layer,,,", - "/home/haldaemon", "/bin/false")) - self.assertEqual(users[2], ("kevin", "x", 1001, 65534, - "Kevin,101,+44123123,+44123124", - "/home/kevin", "/bin/bash")) + self.assertEqual( + users[0], + ("root", "x", 0, 0, "root", "/root", "/bin/bash"), + ) + self.assertEqual( + users[1], + ( + "haldaemon", + "x", + 107, + 116, + "Hardware abstraction layer,,,", + "/home/haldaemon", + "/bin/false", + ), + ) + self.assertEqual( + users[2], + ( + "kevin", + "x", + 1001, + 65534, + "Kevin,101,+44123123,+44123124", + "/home/kevin", + "/bin/bash", + ), + ) def test_get_users_with_many(self): """ The method get_users is responsible for translating tuples of information from the underlying user database into dictionaries. """ - provider = UserProvider(passwd_file=self.passwd_file, - group_file=self.group_file) - users = provider.get_users() - self.assertEqual(users[0], {"username": "root", - "name": u"root", - "uid": 0, "enabled": True, - "location": None, - "home-phone": None, - "work-phone": None, - "primary-gid": 0}) - self.assertEqual(users[1], {"username": "haldaemon", - "name": u"Hardware abstraction layer", - "uid": 107, - "enabled": True, - "location": None, - "home-phone": None, - "work-phone": None, - "primary-gid": 116}) - self.assertEqual(users[2], {"username": "kevin", - "name": u"Kevin", - "uid": 1001, - "enabled": True, - "location": u"101", - "home-phone": u"+44123124", - "work-phone": u"+44123123", - "primary-gid": 65534}) + provider = UserProvider( + passwd_file=self.passwd_file, + group_file=self.group_file, + ) + users = provider.get_users() + self.assertEqual( + users[0], + { + "username": "root", + "name": "root", + "uid": 0, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 0, + }, + ) + self.assertEqual( + users[1], + { + "username": "haldaemon", + "name": "Hardware abstraction layer", + "uid": 107, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 116, + }, + ) + self.assertEqual( + users[2], + { + "username": "kevin", + "name": "Kevin", + "uid": 1001, + "enabled": True, + "location": "101", + "home-phone": "+44123124", + "work-phone": "+44123123", + "primary-gid": 65534, + }, + ) def test_get_group_data(self): """This tests the functionality for parsing /etc/group style files.""" - provider = UserProvider(passwd_file=self.passwd_file, - group_file=self.group_file) + provider = UserProvider( + passwd_file=self.passwd_file, + group_file=self.group_file, + ) groups = provider.get_group_data() - self.assertEqual(groups[0], (u"root", u"x", 0, [u""])) - self.assertEqual(groups[1], (u"cdrom", u"x", 24, - [u"haldaemon", u"kevin"])) - self.assertEqual(groups[2], (u"kevin", u"x", 1000, [u""])) + self.assertEqual(groups[0], ("root", "x", 0, [""])) + self.assertEqual(groups[1], ("cdrom", "x", 24, ["haldaemon", "kevin"])) + self.assertEqual(groups[2], ("kevin", "x", 1000, [""])) def test_get_groups(self): """ The method get_groups is responsible for translating tuples of data from the underlying userdatabase into dictionaries. """ - provider = UserProvider(passwd_file=self.passwd_file, - group_file=self.group_file) + provider = UserProvider( + passwd_file=self.passwd_file, + group_file=self.group_file, + ) groups = provider.get_groups() - self.assertEqual(groups[0], {"name": u"root", - "gid": 0, - "members": []}) - self.assertEqual(groups[1], {"name": u"cdrom", - "gid": 24, - "members": [u"haldaemon", u"kevin"]}) - self.assertEqual(groups[2], {"name": u"kevin", - "gid": 1000, - "members": []}) + self.assertEqual(groups[0], {"name": "root", "gid": 0, "members": []}) + self.assertEqual( + groups[1], + {"name": "cdrom", "gid": 24, "members": ["haldaemon", "kevin"]}, + ) + self.assertEqual( + groups[2], + {"name": "kevin", "gid": 1000, "members": []}, + ) def test_get_users_incorrect_passwd_file(self): """ @@ -408,41 +636,62 @@ Incorrectly formatted lines according to passwd(5) should be ignored during processing. """ - passwd_file = self.makeFile("""\ + passwd_file = self.makeFile( + """\ root:x:0:0:root:/root:/bin/bash broken haldaemon:x:107:Hardware abstraction layer,,,:/home/haldaemon:/bin/false kevin:x:1001:65534:Kevin,101,+44123123,+44123124:/home/kevin:/bin/bash +:::::: broken2 -""") +""", + ) - provider = UserProvider(passwd_file=passwd_file, - group_file=self.group_file) - users = provider.get_users() - self.assertEqual(users[0], {"username": "root", - "name": u"root", - "uid": 0, "enabled": True, - "location": None, - "home-phone": None, - "work-phone": None, - "primary-gid": 0}) - self.assertEqual(users[1], {"username": "kevin", - "name": u"Kevin", - "uid": 1001, - "enabled": True, - "location": u"101", - "home-phone": u"+44123124", - "work-phone": u"+44123123", - "primary-gid": 65534}) - log1 = ("WARNING: passwd file %s is incorrectly formatted: line 2." % - passwd_file) + provider = UserProvider( + passwd_file=passwd_file, + group_file=self.group_file, + ) + users = provider.get_users() + self.assertEqual( + users[0], + { + "username": "root", + "name": "root", + "uid": 0, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 0, + }, + ) + self.assertEqual( + users[1], + { + "username": "kevin", + "name": "Kevin", + "uid": 1001, + "enabled": True, + "location": "101", + "home-phone": "+44123124", + "work-phone": "+44123123", + "primary-gid": 65534, + }, + ) + log1 = ( + f"WARNING: passwd file {passwd_file} is incorrectly " + "formatted: line 2." + ) self.assertIn(log1, self.logfile.getvalue()) - log2 = ("WARNING: passwd file %s is incorrectly formatted: line 3." % - passwd_file) + log2 = ( + f"WARNING: passwd file {passwd_file} is incorrectly " + "formatted: line 3." + ) self.assertIn(log2, self.logfile.getvalue()) - log3 = ("WARNING: passwd file %s is incorrectly formatted: line 6." % - passwd_file) + log3 = ( + f"WARNING: passwd file {passwd_file} is incorrectly " + "formatted: line 6." + ) self.assertIn(log3, self.logfile.getvalue()) def test_get_users_nis_line(self): @@ -452,35 +701,49 @@ We should ignore the specific pattern for NIS user-extensions in passwd files. """ - passwd_file = self.makeFile("""\ + passwd_file = self.makeFile( + """\ root:x:0:0:root:/root:/bin/bash kevin:x:1001:65534:Kevin,101,+44123123,+44123124:/home/kevin:/bin/bash +jkakar:::::: -radix:::::: +:::::: -""") +""", + ) - provider = UserProvider(passwd_file=passwd_file, - group_file=self.group_file) + provider = UserProvider( + passwd_file=passwd_file, + group_file=self.group_file, + ) users = provider.get_users() self.assertTrue(len(users), 2) - self.assertEqual(users[0], {"username": "root", - "name": u"root", - "uid": 0, "enabled": True, - "location": None, - "home-phone": None, - "work-phone": None, - "primary-gid": 0}) - self.assertEqual(users[1], {"username": "kevin", - "name": u"Kevin", - "uid": 1001, - "enabled": True, - "location": u"101", - "home-phone": u"+44123124", - "work-phone": u"+44123123", - "primary-gid": 65534}) - log = ("WARNING: passwd file %s is incorrectly formatted" % - passwd_file) + self.assertEqual( + users[0], + { + "username": "root", + "name": "root", + "uid": 0, + "enabled": True, + "location": None, + "home-phone": None, + "work-phone": None, + "primary-gid": 0, + }, + ) + self.assertEqual( + users[1], + { + "username": "kevin", + "name": "Kevin", + "uid": 1001, + "enabled": True, + "location": "101", + "home-phone": "+44123124", + "work-phone": "+44123123", + "primary-gid": 65534, + }, + ) + log = f"WARNING: passwd file {passwd_file} is incorrectly formatted" self.assertTrue(log not in self.logfile.getvalue()) def test_get_groups_incorrect_groups_file(self): @@ -490,20 +753,27 @@ Incorrectly formatted lines according to group(5) should be ignored during processing. """ - group_file = self.makeFile("""\ + group_file = self.makeFile( + """\ root:x:0: cdrom:x:24: kevin:x:kevin: -""") - provider = UserProvider(passwd_file=self.passwd_file, - group_file=group_file) +""", + ) + provider = UserProvider( + passwd_file=self.passwd_file, + group_file=group_file, + ) groups = provider.get_groups() - self.assertEqual(groups[0], {"name": u"root", "gid": 0, - "members": []}) - self.assertEqual(groups[1], {"name": u"cdrom", "gid": 24, - "members": []}) - log = ("WARNING: group file %s is incorrectly " - "formatted: line 3." % group_file) + self.assertEqual(groups[0], {"name": "root", "gid": 0, "members": []}) + self.assertEqual( + groups[1], + {"name": "cdrom", "gid": 24, "members": []}, + ) + log = ( + f"WARNING: group file {group_file} is incorrectly " + "formatted: line 3." + ) self.assertIn(log, self.logfile.getvalue()) def test_get_groups_nis_line(self): @@ -513,17 +783,20 @@ We should ignore the specific pattern for NIS user-extensions in group files. """ - group_file = self.makeFile("""\ + group_file = self.makeFile( + """\ root:x:0: cdrom:x:24: +jkakar::: -radix::: +::: -""") - provider = UserProvider(passwd_file=self.passwd_file, - group_file=group_file) +""", + ) + provider = UserProvider( + passwd_file=self.passwd_file, + group_file=group_file, + ) groups = provider.get_groups() - self.assertEqual(groups[0], {"name": u"root", "gid": 0, - "members": []}) - log = ("WARNING: group file %s is incorrectly formatted" % group_file) + self.assertEqual(groups[0], {"name": "root", "gid": 0, "members": []}) + log = f"WARNING: group file {group_file} is incorrectly formatted" self.assertTrue(log not in self.logfile.getvalue()) diff -Nru landscape-client-23.02/landscape/client/watchdog.py landscape-client-23.08/landscape/client/watchdog.py --- landscape-client-23.02/landscape/client/watchdog.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/client/watchdog.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,35 +4,41 @@ The main C{landscape-client} program uses this watchdog. """ - -import os import errno -import sys +import os import pwd import signal +import sys import time +from logging import error +from logging import info +from logging import warning +from resource import RLIMIT_NOFILE +from resource import setrlimit -from logging import warning, info, error -from resource import setrlimit, RLIMIT_NOFILE - +from twisted.application.app import startApplication +from twisted.application.service import Application +from twisted.application.service import Service from twisted.internet import reactor -from twisted.internet.defer import Deferred, succeed -from twisted.internet.protocol import ProcessProtocol +from twisted.internet.defer import Deferred +from twisted.internet.defer import succeed from twisted.internet.error import ProcessExitedAlready -from twisted.application.service import Service, Application -from twisted.application.app import startApplication +from twisted.internet.protocol import ProcessProtocol -from landscape.client.deployment import init_logging, Configuration +from landscape.client.broker.amp import RemoteBrokerConnector +from landscape.client.broker.amp import RemoteManagerConnector +from landscape.client.broker.amp import RemoteMonitorConnector +from landscape.client.deployment import Configuration +from landscape.client.deployment import init_logging +from landscape.client.reactor import LandscapeReactor +from landscape.lib.bootstrap import BootstrapDirectory +from landscape.lib.bootstrap import BootstrapFile +from landscape.lib.bootstrap import BootstrapList from landscape.lib.config import get_bindir from landscape.lib.encoding import encode_values -from landscape.lib.twisted_util import gather_results from landscape.lib.log import log_failure from landscape.lib.logging import rotate_logs -from landscape.lib.bootstrap import (BootstrapList, BootstrapFile, - BootstrapDirectory) -from landscape.client.broker.amp import ( - RemoteBrokerConnector, RemoteMonitorConnector, RemoteManagerConnector) -from landscape.client.reactor import LandscapeReactor +from landscape.lib.twisted_util import gather_results GRACEFUL_WAIT_PERIOD = 10 MAXIMUM_CONSECUTIVE_RESTARTS = 5 @@ -52,7 +58,7 @@ """An executable was not found.""" -class Daemon(object): +class Daemon: """A Landscape daemon which can be started and tracked. This class should be subclassed to specify individual daemon. @@ -79,8 +85,7 @@ BIN_DIR = None - def __init__(self, connector, reactor=reactor, verbose=False, - config=None): + def __init__(self, connector, reactor=reactor, verbose=False, config=None): self._connector = connector self._reactor = reactor self._env = os.environ.copy() @@ -121,7 +126,7 @@ dirname = self.BIN_DIR or get_bindir() executable = os.path.join(dirname, self.program) if not os.path.exists(executable): - raise ExecutableNotFoundError("%s doesn't exist" % (executable,)) + raise ExecutableNotFoundError(f"{executable} doesn't exist") return executable def start(self): @@ -132,7 +137,7 @@ if self._last_started + RESTART_BURST_DELAY > now: self._quick_starts += 1 if self._quick_starts == MAXIMUM_CONSECUTIVE_RESTARTS: - error("Can't keep %s running. Exiting." % self.program) + error(f"Can't keep {self.program} running. Exiting.") self._reactor.stop() return else: @@ -150,8 +155,14 @@ if self.options is not None: args.extend(self.options) env = encode_values(self._env) - self._reactor.spawnProcess(self._process, exe, args=args, - env=env, uid=self._uid, gid=self._gid) + self._reactor.spawnProcess( + self._process, + exe, + args=args, + env=env, + uid=self._uid, + gid=self._gid, + ) def stop(self): """Stop this daemon.""" @@ -174,8 +185,11 @@ self._connector.disconnect() return True - connected = self._connector.connect(self.max_retries, self.factor, - quiet=True) + connected = self._connector.connect( + self.max_retries, + self.factor, + quiet=True, + ) connected.addCallback(lambda remote: getattr(remote, name)()) connected.addCallback(disconnect) connected.addErrback(lambda x: False) @@ -258,8 +272,9 @@ def _terminate(self, warn=False): if self.transport is not None: if warn: - warning("%s didn't exit. Sending SIGTERM" - % (self.daemon.program,)) + warning( + f"{self.daemon.program} didn't exit. Sending SIGTERM", + ) try: self.transport.signalProcess(signal.SIGTERM) except ProcessExitedAlready: @@ -275,7 +290,7 @@ except ProcessExitedAlready: pass else: - warning("%s didn't die. Sending SIGKILL." % self.daemon.program) + warning(f"{self.daemon.program} didn't die. Sending SIGKILL.") self._delayed_really_kill = None def rotate_logs(self): @@ -292,23 +307,28 @@ return self._wait_result def wait_or_die(self): - self._delayed_terminate = reactor.callLater(GRACEFUL_WAIT_PERIOD, - self._terminate, warn=True) + self._delayed_terminate = reactor.callLater( + GRACEFUL_WAIT_PERIOD, + self._terminate, + warn=True, + ) return self.wait() - def outReceived(self, data): + def outReceived(self, data): # noqa: N802 # it's *probably* going to always be line buffered, by accident sys.stdout.buffer.write(data) - def errReceived(self, data): + def errReceived(self, data): # noqa: N802 sys.stderr.buffer.write(data) - def processEnded(self, reason): + def processEnded(self, reason): # noqa: N802 """The process has ended; restart it.""" if self._delayed_really_kill is not None: self._delayed_really_kill.cancel() - if (self._delayed_terminate is not None and - self._delayed_terminate.active()): + if ( + self._delayed_terminate is not None + and self._delayed_terminate.active() + ): self._delayed_terminate.cancel() if self._wait_result is not None: self._wait_result.callback(None) @@ -316,47 +336,68 @@ self.daemon.start() -class WatchDog(object): +class WatchDog: """ The Landscape WatchDog starts all other landscape daemons and ensures that they are working. """ - def __init__(self, reactor=reactor, verbose=False, config=None, - broker=None, monitor=None, manager=None, - enabled_daemons=None): + def __init__( + self, + reactor=reactor, + verbose=False, + config=None, + broker=None, + monitor=None, + manager=None, + enabled_daemons=None, + ): landscape_reactor = LandscapeReactor() if enabled_daemons is None: enabled_daemons = [Broker, Monitor, Manager] if broker is None and Broker in enabled_daemons: broker = Broker( RemoteBrokerConnector(landscape_reactor, config), - verbose=verbose, config=config.config) + verbose=verbose, + config=config.config, + ) if monitor is None and Monitor in enabled_daemons: monitor = Monitor( RemoteMonitorConnector(landscape_reactor, config), - verbose=verbose, config=config.config) + verbose=verbose, + config=config.config, + ) if manager is None and Manager in enabled_daemons: manager = Manager( RemoteManagerConnector(landscape_reactor, config), - verbose=verbose, config=config.config) + verbose=verbose, + config=config.config, + ) self.broker = broker self.monitor = monitor self.manager = manager - self.daemons = [daemon - for daemon in [self.broker, self.monitor, self.manager] - if daemon] + self.daemons = [ + daemon + for daemon in [self.broker, self.monitor, self.manager] + if daemon + ] self.reactor = reactor self._checking = None self._stopping = False signal.signal( signal.SIGUSR1, lambda signal, frame: reactor.callFromThread( - self._notify_rotate_logs)) + self._notify_rotate_logs, + ), + ) if config is not None and config.clones > 0: - options = ["--clones", str(config.clones), - "--start-clones-over", str(config.start_clones_over)] + options = [ + "--clones", + str(config.clones), + "--start-clones-over", + str(config.start_clones_over), + ] for daemon in self.daemons: daemon.options = options @@ -375,6 +416,7 @@ def got_all_results(r): return [x[1] for x in r if x[0]] + return gather_results(results).addCallback(got_all_results) def start(self): @@ -399,18 +441,18 @@ def _restart_if_not_running(self, is_running, daemon): if (not is_running) and (not self._stopping): - warning("%s failed to respond to a ping." - % (daemon.program,)) + warning(f"{daemon.program} failed to respond to a ping.") if daemon not in self._ping_failures: self._ping_failures[daemon] = 0 self._ping_failures[daemon] += 1 if self._ping_failures[daemon] == 5: - warning("%s died! Restarting." % (daemon.program,)) + warning(f"{daemon.program} died! Restarting.") stopping = daemon.stop() def stopped(ignored): daemon.start() self._ping_failures[daemon] = 0 + stopping.addBoth(stopped) return stopping else: @@ -425,6 +467,7 @@ def reschedule(ignored): self._checking = self.reactor.callLater(5, self._check) + gather_results(all_running).addBoth(reschedule) def request_exit(self): @@ -444,8 +487,10 @@ else: # If request_exit fails, we should just kill the daemons # immediately. - error("Couldn't request that broker gracefully shut down; " - "killing forcefully.") + error( + "Couldn't request that broker gracefully shut down; " + "killing forcefully.", + ) results = [x.stop() for x in self.daemons] return gather_results(results) @@ -459,17 +504,25 @@ class WatchDogConfiguration(Configuration): - def make_parser(self): - parser = super(WatchDogConfiguration, self).make_parser() - parser.add_option("--daemon", action="store_true", - help="Fork and run in the background.") - parser.add_option("--pid-file", type="str", - help="The file to write the PID to.") - parser.add_option("--monitor-only", action="store_true", - help="Don't enable management features. This is " - "useful if you want to run the client as a non-root " - "user.") + parser = super().make_parser() + parser.add_option( + "--daemon", + action="store_true", + help="Fork and run in the background.", + ) + parser.add_option( + "--pid-file", + type="str", + help="The file to write the PID to.", + ) + parser.add_option( + "--monitor-only", + action="store_true", + help="Don't enable management features. This is " + "useful if you want to run the client as a non-root " + "user.", + ) return parser def get_enabled_daemons(self): @@ -488,7 +541,7 @@ os._exit(0) # kill off parent again. # some argue that this umask should be 0, but that's annoying. os.umask(0o077) - null = os.open('/dev/null', os.O_RDWR) + null = os.open("/dev/null", os.O_RDWR) for i in range(3): try: os.dup2(null, i) @@ -499,44 +552,55 @@ class WatchDogService(Service): - def __init__(self, config): self._config = config - self.watchdog = WatchDog(verbose=not config.daemon, - config=config, - enabled_daemons=config.get_enabled_daemons()) + self.watchdog = WatchDog( + verbose=not config.daemon, + config=config, + enabled_daemons=config.get_enabled_daemons(), + ) self.exit_code = 0 - def startService(self): + def startService(self): # noqa: N802 Service.startService(self) - bootstrap_list.bootstrap(data_path=self._config.data_path, - log_dir=self._config.log_dir) + bootstrap_list.bootstrap( + data_path=self._config.data_path, + log_dir=self._config.log_dir, + ) if self._config.clones > 0: # Let clones open an appropriate number of fds - setrlimit(RLIMIT_NOFILE, (self._config.clones * 100, - self._config.clones * 200)) + setrlimit( + RLIMIT_NOFILE, + (self._config.clones * 100, self._config.clones * 200), + ) # Increase the timeout of AMP's MethodCalls. # XXX: we should find a better way to expose this knot, and # not set it globally on the class from landscape.lib.amp import MethodCallSender + MethodCallSender.timeout = 300 # Create clones log and data directories for i in range(self._config.clones): - suffix = "-clone-%d" % i + suffix = f"-clone-{i:d}" bootstrap_list.bootstrap( data_path=self._config.data_path + suffix, - log_dir=self._config.log_dir + suffix) + log_dir=self._config.log_dir + suffix, + ) result = succeed(None) result.addCallback(lambda _: self.watchdog.check_running()) def start_if_not_running(running_daemons): if running_daemons: - error("ERROR: The following daemons are already running: %s" - % (", ".join(x.program for x in running_daemons))) + error( + "ERROR: The following daemons are already " + "running: {}".format( + ", ".join(x.program for x in running_daemons), + ), + ) self.exit_code = 1 reactor.crash() # so stopService isn't called. return @@ -548,6 +612,7 @@ log_failure(failure, "Unknown error occurred!") self.exit_code = 2 reactor.crash() + result.addCallback(start_if_not_running) result.addErrback(die) return result @@ -560,7 +625,7 @@ stream.write(str(os.getpid())) stream.close() - def stopService(self): + def stopService(self): # noqa: N802 info("Stopping client...") Service.stopService(self) @@ -582,21 +647,45 @@ os.unlink(pid_file) -bootstrap_list = BootstrapList([ - BootstrapDirectory("$data_path", "landscape", "root", 0o755), - BootstrapDirectory("$data_path/package", "landscape", "root", 0o755), - BootstrapDirectory( - "$data_path/package/hash-id", "landscape", "root", 0o755), - BootstrapDirectory( - "$data_path/package/binaries", "landscape", "root", 0o755), - BootstrapDirectory( - "$data_path/package/upgrade-tool", "landscape", "root", 0o755), - BootstrapDirectory("$data_path/messages", "landscape", "root", 0o755), - BootstrapDirectory("$data_path/sockets", "landscape", "root", 0o750), - BootstrapDirectory( - "$data_path/custom-graph-scripts", "landscape", "root", 0o755), - BootstrapDirectory("$log_dir", "landscape", "root", 0o755), - BootstrapFile("$data_path/package/database", "landscape", "root", 0o644)]) +bootstrap_list = BootstrapList( + [ + BootstrapDirectory("$data_path", "landscape", "root", 0o755), + BootstrapDirectory("$data_path/package", "landscape", "root", 0o755), + BootstrapDirectory( + "$data_path/package/hash-id", + "landscape", + "root", + 0o755, + ), + BootstrapDirectory( + "$data_path/package/binaries", + "landscape", + "root", + 0o755, + ), + BootstrapDirectory( + "$data_path/package/upgrade-tool", + "landscape", + "root", + 0o755, + ), + BootstrapDirectory("$data_path/messages", "landscape", "root", 0o755), + BootstrapDirectory("$data_path/sockets", "landscape", "root", 0o750), + BootstrapDirectory( + "$data_path/custom-graph-scripts", + "landscape", + "root", + 0o755, + ), + BootstrapDirectory("$log_dir", "landscape", "root", 0o755), + BootstrapFile( + "$data_path/package/database", + "landscape", + "root", + 0o644, + ), + ], +) def clean_environment(): @@ -608,8 +697,10 @@ *other* maintainer scripts which landscape-client invokes. """ for key in list(os.environ.keys()): - if (key.startswith(("DEBIAN_", "DEBCONF_")) or - key in ["LANDSCAPE_ATTACHMENTS", "MAIL"]): + if key.startswith(("DEBIAN_", "DEBCONF_")) or key in [ + "LANDSCAPE_ATTACHMENTS", + "MAIL", + ]: del os.environ[key] diff -Nru landscape-client-23.02/landscape/constants.py landscape-client-23.08/landscape/constants.py --- landscape-client-23.02/landscape/constants.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/constants.py 2023-08-17 21:19:32.000000000 +0000 @@ -8,8 +8,16 @@ # The name "UBUNTU" is used in the variable name due to the fact that the path # is Ubuntu-specific, taken from /etc/login.defs. UBUNTU_PATH = ":".join( - ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", - "/bin", "/snap/bin"]) + [ + "/usr/local/sbin", + "/usr/local/bin", + "/usr/sbin", + "/usr/bin", + "/sbin", + "/bin", + "/snap/bin", + ], +) SUCCESS_RESULT = 1 ERROR_RESULT = 100 @@ -33,3 +41,8 @@ # So we'll give the problem one chance to resolve itself, by only waiting for # one run of apt-update. UNKNOWN_PACKAGE_DATA_TIMEOUT = 70 * 60 + +# Until we modernize config-parsing, here are all the string values we +# accept as `False` in the conf file. Any other value is `True`, +# conventionally. +FALSE_VALUES = (False, "False", "false", 0, "no") diff -Nru landscape-client-23.02/landscape/__init__.py landscape-client-23.08/landscape/__init__.py --- landscape-client-23.02/landscape/__init__.py 2023-02-08 18:16:34.000000000 +0000 +++ landscape-client-23.08/landscape/__init__.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,6 @@ DEBIAN_REVISION = "" -UPSTREAM_VERSION = "23.02" -VERSION = "%s%s" % (UPSTREAM_VERSION, DEBIAN_REVISION) +UPSTREAM_VERSION = "23.08" +VERSION = f"{UPSTREAM_VERSION}{DEBIAN_REVISION}" # The minimum server API version that all Landscape servers are known to speak # and support. It can serve as fallback in case higher versions are not there. diff -Nru landscape-client-23.02/landscape/lib/amp.py landscape-client-23.08/landscape/lib/amp.py --- landscape-client-23.02/landscape/lib/amp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/amp.py 2023-08-17 21:19:32.000000000 +0000 @@ -11,7 +11,7 @@ class Greeter(object): def hello(self, name): - return "hi %s!" % name + return f"hi {name}!" greeter = Greeter() @@ -46,13 +46,20 @@ """ from uuid import uuid4 -from twisted.internet.defer import Deferred, maybeDeferred, succeed -from twisted.internet.protocol import ServerFactory, ReconnectingClientFactory -from twisted.python.failure import Failure +from twisted.internet.defer import Deferred +from twisted.internet.defer import maybeDeferred +from twisted.internet.defer import succeed +from twisted.internet.protocol import ReconnectingClientFactory +from twisted.internet.protocol import ServerFactory +from twisted.protocols.amp import AMP +from twisted.protocols.amp import Argument +from twisted.protocols.amp import Command +from twisted.protocols.amp import CommandLocator +from twisted.protocols.amp import Integer +from twisted.protocols.amp import MAX_VALUE_LENGTH +from twisted.protocols.amp import String from twisted.python.compat import xrange - -from twisted.protocols.amp import ( - Argument, String, Integer, Command, AMP, MAX_VALUE_LENGTH, CommandLocator) +from twisted.python.failure import Failure from landscape.lib import bpickle @@ -60,18 +67,18 @@ class MethodCallArgument(Argument): """A bpickle-compatible argument.""" - def toString(self, inObject): + def toString(self, inobject): # noqa: N802 """Serialize an argument.""" - return bpickle.dumps(inObject) + return bpickle.dumps(inobject) - def fromString(self, inString): + def fromString(self, instring): # noqa: N802 """Unserialize an argument.""" - return bpickle.loads(inString) + return bpickle.loads(instring) @classmethod - def check(cls, inObject): + def check(cls, inobject): """Check if an argument is serializable.""" - return type(inObject) in bpickle.dumps_table + return type(inobject) in bpickle.dumps_table class MethodCallError(Exception): @@ -96,9 +103,11 @@ and C{kwargs} the keyword ones. """ - arguments = [(b"sequence", Integer()), - (b"method", String()), - (b"arguments", String())] + arguments = [ + (b"sequence", Integer()), + (b"method", String()), + (b"arguments", String()), + ] response = [(b"result", MethodCallArgument())] @@ -120,8 +129,7 @@ being split and buffered. """ - arguments = [(b"sequence", Integer()), - (b"chunk", String())] + arguments = [(b"sequence", Integer()), (b"chunk", String())] response = [(b"result", Integer())] @@ -172,7 +180,7 @@ # it here again. method = method.decode("utf-8") if method not in self._methods: - raise MethodCallError("Forbidden method '%s'" % method) + raise MethodCallError(f"Forbidden method '{method}'") method_func = getattr(self._object, method) @@ -208,7 +216,7 @@ return result -class MethodCallSender(object): +class MethodCallSender: """Call methods on a remote object over L{AMP} and return the result. @param protocol: A connected C{AMP} protocol. @@ -216,6 +224,7 @@ @ivar timeout: A timeout for remote method class, see L{send_method_call}. """ + timeout = 60 _chunk_size = MAX_VALUE_LENGTH @@ -271,8 +280,10 @@ method = method.encode("utf-8") # Split the given arguments in one or more chunks - chunks = [arguments[i:i + self._chunk_size] - for i in xrange(0, len(arguments), self._chunk_size)] + chunks = [ + arguments[i : i + self._chunk_size] + for i in xrange(0, len(arguments), self._chunk_size) + ] result = Deferred() if len(chunks) > 1: @@ -280,8 +291,13 @@ for chunk in chunks[:-1]: def create_send_chunk(sequence, chunk): - send_chunk = (lambda x: self._protocol.callRemote( - MethodCallChunk, sequence=sequence, chunk=chunk)) + def send_chunk(x): + return self._protocol.callRemote( + MethodCallChunk, + sequence=sequence, + chunk=chunk, + ) + return send_chunk result.addCallback(create_send_chunk(sequence, chunk)) @@ -289,7 +305,11 @@ def send_last_chunk(ignored): chunk = chunks[-1] return self._call_remote_with_timeout( - MethodCall, sequence=sequence, method=method, arguments=chunk) + MethodCall, + sequence=sequence, + method=method, + arguments=chunk, + ) result.addCallback(send_last_chunk) result.addCallback(lambda response: response["result"]) @@ -309,13 +329,13 @@ factory = None - def connectionMade(self): + def connectionMade(self): # noqa: N802 """Notify our factory that we're ready to go.""" if self.factory is not None: # Factory can be None in unit-tests self.factory.clientConnectionMade(self) -class RemoteObject(object): +class RemoteObject: """An object able to transparently call methods on a remote object. Any method call on a L{RemoteObject} instance will return a L{Deferred} @@ -342,6 +362,7 @@ keyword arguments it was called with, and returning a L{Deferred} resulting in the L{MethodCall}'s response value. """ + def send_method_call(*args, **kwargs): deferred = Deferred() self._send_method_call(method, args, kwargs, deferred) @@ -351,12 +372,20 @@ def _send_method_call(self, method, args, kwargs, deferred, call=None): """Send a L{MethodCall} command, adding callbacks to handle retries.""" - result = self._sender.send_method_call(method=method, - args=args, - kwargs=kwargs) + result = self._sender.send_method_call( + method=method, + args=args, + kwargs=kwargs, + ) result.addCallback(self._handle_result, deferred, call=call) - result.addErrback(self._handle_failure, method, args, kwargs, - deferred, call=call) + result.addErrback( + self._handle_failure, + method, + args, + kwargs, + deferred, + call=call, + ) if self._factory.fake_connection is not None: # Transparently flush the connection after a send_method_call @@ -377,8 +406,15 @@ call.cancel() # This is a successful retry, cancel the timeout. deferred.callback(result) - def _handle_failure(self, failure, method, args, kwargs, deferred, - call=None): + def _handle_failure( + self, + failure, + method, + args, + kwargs, + deferred, + call=None, + ): """Called when a L{MethodCall} command fails. If a failure is due to a connection error and if C{retry_on_reconnect} @@ -413,10 +449,15 @@ # This is the first failure for this request, let's schedule a # timeout call. timeout = Failure(MethodCallError("timeout")) - call = self._factory.clock.callLater(self._factory.retryTimeout, - self._handle_failure, - timeout, method, args, - kwargs, deferred=deferred) + call = self._factory.clock.callLater( + self._factory.retryTimeout, + self._handle_failure, + timeout, + method, + args, + kwargs, + deferred=deferred, + ) self._pending_requests[deferred] = (method, args, kwargs, call) @@ -459,7 +500,7 @@ self.object = obj self.methods = methods - def buildProtocol(self, addr): + def buildProtocol(self, addr): # noqa: N802 protocol = self.protocol(self.object, self.methods) protocol.factory = self return protocol @@ -487,13 +528,13 @@ """ factor = 1.6180339887498948 - maxDelay = 30 + maxDelay = 30 # noqa: N815 protocol = MethodCallClientProtocol remote = RemoteObject - retryOnReconnect = False - retryTimeout = None + retryOnReconnect = False # noqa: N815 + retryTimeout = None # noqa: N815 # XXX support exposing fake asynchronous connections created by tests, so # they can be flushed transparently and emulate a synchronous behavior. See @@ -514,7 +555,7 @@ self._requests = [] self._remote = None - def getRemoteObject(self): + def getRemoteObject(self): # noqa: N802 """Get a L{RemoteObject} as soon as the connection is ready. @return: A C{Deferred} firing with a connected L{RemoteObject}. @@ -525,15 +566,15 @@ self._requests.append(deferred) return deferred - def notifyOnConnect(self, callback): + def notifyOnConnect(self, callback): # noqa: N802 """Invoke the given C{callback} when a connection is re-established.""" self._connects.append(callback) - def dontNotifyOnConnect(self, callback): + def dontNotifyOnConnect(self, callback): # noqa: N802 """Remove the given C{callback} from listeners.""" self._connects.remove(callback) - def clientConnectionMade(self, protocol): + def clientConnectionMade(self, protocol): # noqa: N802 """Called when a newly built protocol gets connected.""" if self._remote is None: # This is the first time we successfully connect @@ -545,15 +586,18 @@ # In all cases fire pending requests self._fire_requests(self._remote) - def clientConnectionFailed(self, connector, reason): + def clientConnectionFailed(self, connector, reason): # noqa: N802 """Try to connect again or errback pending request.""" - ReconnectingClientFactory.clientConnectionFailed(self, connector, - reason) + ReconnectingClientFactory.clientConnectionFailed( + self, + connector, + reason, + ) if self._callID is None: # The factory won't retry to connect, so notify that we failed self._fire_requests(reason) - def buildProtocol(self, addr): + def buildProtocol(self, addr): # noqa: N802 self.resetDelay() protocol = ReconnectingClientFactory.buildProtocol(self, addr) return protocol diff -Nru landscape-client-23.02/landscape/lib/apt/package/facade.py landscape-client-23.08/landscape/lib/apt/package/facade.py --- landscape-client-23.02/landscape/lib/apt/package/facade.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/facade.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import hashlib import logging import os @@ -7,23 +5,23 @@ import sys import tempfile import time - from operator import attrgetter import apt import apt_inst import apt_pkg - -from aptsources.sourceslist import SourcesList -from apt.progress.text import AcquireProgress from apt.progress.base import InstallProgress +from apt.progress.text import AcquireProgress +from aptsources.sourceslist import SourcesList from twisted.python.compat import itervalues - -from landscape.lib.compat import StringIO -from landscape.lib.fs import append_text_file, create_text_file -from landscape.lib.fs import read_text_file, read_binary_file, touch_file from .skeleton import build_skeleton_apt +from landscape.lib.compat import StringIO +from landscape.lib.fs import append_text_file +from landscape.lib.fs import create_text_file +from landscape.lib.fs import read_binary_file +from landscape.lib.fs import read_text_file +from landscape.lib.fs import touch_file class TransactionError(Exception): @@ -37,8 +35,11 @@ self.packages = packages def __str__(self): - return ("Missing dependencies: %s" % - ", ".join([str(package) for package in self.packages])) + return "Missing dependencies: {}".format( + ", ".join( + [str(package) for package in self.packages], + ), + ) class ChannelError(Exception): @@ -46,7 +47,6 @@ class LandscapeAcquireProgress(AcquireProgress): - def _winch(self, *dummy): """Override trying to get the column count of the buffer. @@ -84,13 +84,13 @@ determine whether there were any errors. If dpkg didn't exit cleanly it should mean that something went wrong. """ - res = super(LandscapeInstallProgress, self).wait_child() + res = super().wait_child() self.dpkg_exited = os.WIFEXITED(res) return res def fork(self): """Fork and override the excepthook in the child process.""" - pid = super(LandscapeInstallProgress, self).fork() + pid = super().fork() if pid == 0: # No need to clean up after ourselves, since the child # process will die after dpkg has been run. @@ -115,7 +115,7 @@ self.old_excepthook(exc_type, exc_obj, exc_tb) -class AptFacade(object): +class AptFacade: """Wrapper for tasks using Apt. This object wraps Apt features, in a way that makes using and testing @@ -186,14 +186,19 @@ For Apt, it means all packages that are held. """ return [ - version for version in self.get_packages() - if (self.is_package_installed(version) and - self._is_package_held(version.package))] + version + for version in self.get_packages() + if ( + self.is_package_installed(version) + and self._is_package_held(version.package) + ) + ] def get_package_holds(self): """Return the name of all the packages that are on hold.""" - return sorted([version.package.name - for version in self.get_locked_packages()]) + return sorted( + [version.package.name for version in self.get_locked_packages()], + ) def _set_dpkg_selections(self, selection): """Set the dpkg selection. @@ -202,7 +207,8 @@ """ process = subprocess.Popen( ["dpkg", "--set-selections"] + self._dpkg_args, - stdin=subprocess.PIPE) + stdin=subprocess.PIPE, + ) # We need bytes here to communicate with the process. process.communicate(selection.encode("utf-8")) @@ -218,9 +224,9 @@ @param version: The version of the package to unhold. """ - if (not self.is_package_installed(version) or - not self._is_package_held(version.package) - ): + if not self.is_package_installed(version) or not self._is_package_held( + version.package, + ): return self._set_dpkg_selections(version.package.name + " install") @@ -233,9 +239,9 @@ """ self._cache.open(None) internal_sources_list = self._get_internal_sources_list() - if (self.refetch_package_index or - (force_reload_binaries and os.path.exists(internal_sources_list)) - ): + if self.refetch_package_index or ( + force_reload_binaries and os.path.exists(internal_sources_list) + ): # Try to update only the internal repos, if the python-apt # version is new enough to accept a sources_list parameter. @@ -249,8 +255,8 @@ self._cache.update() except apt.cache.FetchFailedException: raise ChannelError( - "Apt failed to reload channels (%r)" % ( - self.get_channels())) + f"Apt failed to reload channels ({self.get_channels()!r})", + ) self._cache.open(None) self._pkg2hash.clear() @@ -259,13 +265,15 @@ if not self._is_main_architecture(package): continue for version in package.versions: - hash = self.get_package_skeleton( - version, with_info=False).get_hash() + skeleton_hash = self.get_package_skeleton( + version, + with_info=False, + ).get_hash() # Use a tuple including the package, since the Version # objects of two different packages can have the same # hash. - self._pkg2hash[(package, version)] = hash - self._hash2pkg[hash] = version + self._pkg2hash[(package, version)] = skeleton_hash + self._hash2pkg[skeleton_hash] = version self._channels_loaded = True def ensure_channels_reloaded(self): @@ -279,8 +287,13 @@ sources_dir = apt_pkg.config.find_dir("Dir::Etc::sourceparts") return os.path.join(sources_dir, "_landscape-internal-facade.list") - def add_channel_apt_deb(self, url, codename, components=None, - trusted=None): + def add_channel_apt_deb( + self, + url, + codename, + components=None, + trusted=None, + ): """Add a deb URL which points to a repository. @param url: The base URL of the repository. @@ -292,10 +305,10 @@ source_options = "" if trusted is not None and url.startswith("file:"): trusted_val = "yes" if trusted else "no" - source_options = "[ trusted={} ] ".format(trusted_val) - sources_line = "deb {}{} {}".format(source_options, url, codename) + source_options = f"[ trusted={trusted_val} ] " + sources_line = f"deb {source_options}{url} {codename}" if components: - sources_line += " %s" % " ".join(components) + sources_line += " {}".format(" ".join(components)) if os.path.exists(sources_file_path): current_content = read_text_file(sources_file_path).split("\n") if sources_line in current_content: @@ -314,7 +327,7 @@ self._create_packages_file(path) # yakkety+ validate even file repository by default. deb dirs don't # have a signed Release file but are local so they should be trusted. - self.add_channel_apt_deb("file://%s" % path, "./", None, trusted=True) + self.add_channel_apt_deb(f"file://{path}", "./", None, trusted=True) def clear_channels(self): """Clear the channels that have been added through the facade. @@ -345,9 +358,16 @@ and type keys. """ sources_list = SourcesList() - return [{"baseurl": entry.uri, "distribution": entry.dist, - "components": " ".join(entry.comps), "type": entry.type} - for entry in sources_list if not entry.disabled] + return [ + { + "baseurl": entry.uri, + "distribution": entry.dist, + "components": " ".join(entry.comps), + "type": entry.type, + } + for entry in sources_list + if not entry.disabled + ] def reset_channels(self): """Remove all the configured channels.""" @@ -373,17 +393,26 @@ sha1 = hashlib.sha1(contents).hexdigest() sha256 = hashlib.sha256(contents).hexdigest() tag_section = apt_pkg.TagSection(control) - new_tags = [("Filename", filename), ("Size", str(size)), - ("MD5sum", md5), ("SHA1", sha1), ("SHA256", sha256)] + new_tags = [ + ("Filename", filename), + ("Size", str(size)), + ("MD5sum", md5), + ("SHA1", sha1), + ("SHA256", sha256), + ] try: tag_section.write( dest, apt_pkg.REWRITE_PACKAGE_ORDER, - [apt_pkg.TagRewrite(k, v) for k, v in new_tags]) + [apt_pkg.TagRewrite(k, v) for k, v in new_tags], + ) except AttributeError: # support for python-apt < 1.9 section = apt_pkg.rewrite_section( - tag_section, apt_pkg.REWRITE_PACKAGE_ORDER, new_tags) + tag_section, + apt_pkg.REWRITE_PACKAGE_ORDER, + new_tags, + ) dest.write(section.encode("utf-8")) def get_arch(self): @@ -481,8 +510,10 @@ @param name: The name the returned packages should have. """ return [ - version for version in self.get_packages() - if version.package.name == name] + version + for version in self.get_packages() + if version.package.name == name + ] def _is_package_broken(self, package): """Is the package broken? @@ -497,18 +528,21 @@ """ if package.is_inst_broken: return True - if (not package.marked_install and - not package.marked_upgrade and - not package.marked_downgrade - ): + if ( + not package.marked_install + and not package.marked_upgrade + and not package.marked_downgrade + ): return package in self._package_installs return False def _get_broken_packages(self): """Return the packages that are in a broken state.""" - return set( - version.package for version in self.get_packages() - if self._is_package_broken(version.package)) + return { + version.package + for version in self.get_packages() + if self._is_package_broken(version.package) + } def _get_changed_versions(self, package): """Return the versions that will be changed for the package. @@ -538,26 +572,31 @@ # comparison checks. Same versions of different packages compare # as being the same, so we need to include the package as well. all_changes = [ - (version.package, version) for version in requested_changes] + (version.package, version) for version in requested_changes + ] versions_to_be_changed = set() for package in self._cache.get_changes(): if not self._is_main_architecture(package): continue versions = self._get_changed_versions(package) versions_to_be_changed.update( - (package, version) for version in versions) + (package, version) for version in versions + ) dependencies = versions_to_be_changed.difference(all_changes) if dependencies: raise DependencyError( - [version for package, version in dependencies]) + [version for package, version in dependencies], + ) return len(versions_to_be_changed) > 0 def _get_unmet_relation_info(self, dep_relation): """Return a string representation of a specific dependency relation.""" info = dep_relation.target_pkg.name if dep_relation.target_ver: - info += " (%s %s)" % ( - dep_relation.comp_type, dep_relation.target_ver) + info += " ({} {})".format( + dep_relation.comp_type, + dep_relation.target_ver, + ) reason = " but is not installable" if dep_relation.target_pkg.name in self._cache: dep_package = self._cache[dep_relation.target_pkg.name] @@ -565,7 +604,7 @@ version = dep_package.candidate.version if dep_package not in self._cache.get_changes(): version = dep_package.installed.version - reason = " but %s is to be installed" % version + reason = f" but {version} is to be installed" info += reason return info @@ -582,10 +621,10 @@ for or_dep in dependency: for target in or_dep.all_targets(): package = target.parent_pkg - if ((package.current_state == apt_pkg.CURSTATE_INSTALLED or - depcache.marked_install(package)) and - not depcache.marked_delete(package) - ): + if ( + package.current_state == apt_pkg.CURSTATE_INSTALLED + or depcache.marked_install(package) + ) and not depcache.marked_delete(package): return is_positive return not is_positive @@ -608,8 +647,9 @@ found_dependency_error = False # Fetch candidate version from our install list because # apt-2.1.5 resets broken packages candidate. - candidate = next(v._cand for v in self._version_installs - if v.package == package) + candidate = next( + v._cand for v in self._version_installs if v.package == package + ) for dep_type in ["PreDepends", "Depends", "Conflicts", "Breaks"]: dependencies = candidate.depends_list.get(dep_type, []) for dependency in dependencies: @@ -618,14 +658,19 @@ relation_infos = [] for dep_relation in dependency: relation_infos.append( - self._get_unmet_relation_info(dep_relation)) - info = " %s: %s: " % (package.name, dep_type) + self._get_unmet_relation_info(dep_relation), + ) + info = f" {package.name}: {dep_type}: " or_divider = " or\n" + " " * len(info) all_info.append(info + or_divider.join(relation_infos)) found_dependency_error = True if not found_dependency_error: all_info.append( - " %s: %s" % (package.name, "Unknown dependency error")) + " {}: {}".format( + package.name, + "Unknown dependency error", + ), + ) return "\n".join(all_info) def _set_frontend_noninteractive(self): @@ -648,19 +693,29 @@ """ Perform pending hold operations on packages. """ - hold_changes = (len(self._version_hold_creations) > 0 or - len(self._version_hold_removals) > 0) + hold_changes = ( + len(self._version_hold_creations) > 0 + or len(self._version_hold_removals) > 0 + ) if not hold_changes: return None - not_installed = [version for version in - self._version_hold_creations - if not self.is_package_installed(version)] + not_installed = [ + version + for version in self._version_hold_creations + if not self.is_package_installed(version) + ] if not_installed: raise TransactionError( - "Cannot perform the changes, since the following " + - "packages are not installed: %s" % ", ".join( - [version.package.name - for version in sorted(not_installed)])) + "Cannot perform the changes, since the following " + + "packages are not installed: {}".format( + ", ".join( + [ + version.package.name + for version in sorted(not_installed) + ], + ), + ), + ) for version in self._version_hold_creations: self.set_package_hold(version) @@ -701,33 +756,43 @@ time.sleep(self.dpkg_retry_sleep) logging.warning( "dpkg process might be in use. " - "Retrying package changes. %d retries remaining." - % (self.max_dpkg_retries - dpkg_tries)) + "Retrying package changes. " + f"{self.max_dpkg_retries - dpkg_tries:d} " + "retries remaining.", + ) dpkg_tries += 1 try: self._cache.commit( fetch_progress=LandscapeAcquireProgress(fetch_output), - install_progress=install_progress) + install_progress=install_progress, + ) if not install_progress.dpkg_exited: raise SystemError("dpkg didn't exit cleanly.") except SystemError as exc: - result_text = (fetch_output.getvalue() + - read_text_file(install_output_path)) - error = TransactionError(exc.args[0] + - "\n\nPackage operation log:\n" + - result_text) + result_text = fetch_output.getvalue() + read_text_file( + install_output_path, + ) + error = TransactionError( + exc.args[0] + + "\n\nPackage operation log:\n" + + result_text, + ) # No need to retry SystemError, since it's most # likely a permanent error. break except apt.cache.LockFailedException as exception: - result_text = (fetch_output.getvalue() + - read_text_file(install_output_path)) - error = TransactionError(exception.args[0] + - "\n\nPackage operation log:\n" + - result_text) + result_text = fetch_output.getvalue() + read_text_file( + install_output_path, + ) + error = TransactionError( + exception.args[0] + + "\n\nPackage operation log:\n" + + result_text, + ) else: - result_text = (fetch_output.getvalue() + - read_text_file(install_output_path)) + result_text = fetch_output.getvalue() + read_text_file( + install_output_path, + ) break if error is not None: raise error @@ -754,8 +819,9 @@ # installed packages from auto-removal, while allowing upgrades # of auto-removable packages. is_manual = ( - not version.package.installed or - not version.package.is_auto_installed) + not version.package.installed + or not version.package.is_auto_installed + ) # Set auto_fix=False to avoid removing the package we asked to # install when we need to resolve dependencies. @@ -767,12 +833,15 @@ def _preprocess_removes(self, fixer): held_package_names = set() - package_installs = set( - version.package for version in self._version_installs) - - package_upgrades = set( - version.package for version in self._version_removals - if version.package in package_installs) + package_installs = { + version.package for version in self._version_installs + } + + package_upgrades = { + version.package + for version in self._version_removals + if version.package in package_installs + } for version in self._version_removals: if self._is_package_held(version.package): @@ -796,8 +865,9 @@ if held_package_names: raise TransactionError( - "Can't perform the changes, since the following packages" + - " are held: %s" % ", ".join(sorted(held_package_names))) + "Can't perform the changes, since the following packages " + "are held: {}".format(", ".join(sorted(held_package_names))), + ) def _preprocess_global_upgrade(self): if self._global_upgrade: @@ -812,8 +882,9 @@ try: fixer.resolve(True) except SystemError as error: - raise TransactionError(error.args[0] + "\n" + - self._get_unmet_dependency_info()) + raise TransactionError( + error.args[0] + "\n" + self._get_unmet_dependency_info(), + ) else: now_broken_packages = self._get_broken_packages() if now_broken_packages != already_broken_packages: @@ -822,7 +893,7 @@ def _preprocess_package_changes(self): version_changes = self._version_installs[:] version_changes.extend(self._version_removals) - if (not version_changes and not self._global_upgrade): + if not version_changes and not self._global_upgrade: return [] already_broken_packages = self._get_broken_packages() fixer = apt_pkg.ProblemResolver(self._cache._depcache) diff -Nru landscape-client-23.02/landscape/lib/apt/package/skeleton.py landscape-client-23.08/landscape/lib/apt/package/skeleton.py --- landscape-client-23.02/landscape/lib/apt/package/skeleton.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/skeleton.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,29 +1,30 @@ -from landscape.lib.compat import unicode, _PY3 -from landscape.lib.hashlib import sha1 +import apt -import apt_pkg +from landscape.lib.compat import _PY3 +from landscape.lib.compat import unicode +from landscape.lib.hashlib import sha1 -PACKAGE = 1 << 0 -PROVIDES = 1 << 1 -REQUIRES = 1 << 2 -UPGRADES = 1 << 3 +PACKAGE = 1 << 0 +PROVIDES = 1 << 1 +REQUIRES = 1 << 2 +UPGRADES = 1 << 3 CONFLICTS = 1 << 4 -DEB_PACKAGE = 1 << 16 | PACKAGE -DEB_PROVIDES = 2 << 16 | PROVIDES +DEB_PACKAGE = 1 << 16 | PACKAGE +DEB_PROVIDES = 2 << 16 | PROVIDES DEB_NAME_PROVIDES = 3 << 16 | PROVIDES -DEB_REQUIRES = 4 << 16 | REQUIRES -DEB_OR_REQUIRES = 5 << 16 | REQUIRES -DEB_UPGRADES = 6 << 16 | UPGRADES -DEB_CONFLICTS = 7 << 16 | CONFLICTS +DEB_REQUIRES = 4 << 16 | REQUIRES +DEB_OR_REQUIRES = 5 << 16 | REQUIRES +DEB_UPGRADES = 6 << 16 | UPGRADES +DEB_CONFLICTS = 7 << 16 | CONFLICTS class PackageTypeError(Exception): """Raised when an unsupported package type is passed to build_skeleton.""" -class PackageSkeleton(object): +class PackageSkeleton: section = None summary = None @@ -51,13 +52,13 @@ return self._hash # We use ascii here as encoding for backwards compatibility as it was # default encoding for conversion from unicode to bytes in Python 2.7. - package_info = ("[%d %s %s]" % (self.type, self.name, self.version) - ).encode("ascii") + package_info = (f"[{self.type:d} {self.name} {self.version}]").encode( + "ascii", + ) digest = sha1(package_info) self.relations.sort() for pair in self.relations: - digest.update(("[%d %s]" % (pair[0], pair[1]) - ).encode("ascii")) + digest.update((f"[{pair[0]:d} {pair[1]}]").encode("ascii")) return digest.digest() def set_hash(self, package_hash): @@ -85,30 +86,52 @@ name, version, relation_type = relation_tuple relation_string = name if relation_type: - relation_string += " %s %s" % (relation_type, version) + relation_string += f" {relation_type} {version}" return relation_string -def parse_record_field(record, record_field, relation_type, - or_relation_type=None): - """Parse an apt C{Record} field and return skeleton relations - - @param record: An C{apt.package.Record} instance with package information. - @param record_field: The name of the record field to parse. +def parse_record_dependencies( + dependencies, + relation_type, + or_relation_type=None, +): + """Parse an apt C{Dependency} list and return skeleton relations + + @param dependencies: list of dependencies returned by get_dependencies() + this function also accepts the special case for version.provides which + is a list of string @param relation_type: The deb relation that can be passed to C{skeleton.add_relation()} @param or_relation_type: The deb relation that should be used if there is more than one value in a relation. """ + + # Prepare list of dependencies relations = set() - values = apt_pkg.parse_depends(record.get(record_field, "")) - for value in values: - value_strings = [relation_to_string(relation) for relation in value] + for dependency in dependencies: + + # Process dependency + depend = [] + if isinstance(dependency, apt.package.Dependency): + for basedependency in dependency: + depend.append( + ( + basedependency.name, + basedependency.version, + basedependency.relation, + ), + ) + else: + depend.append((dependency, "", "")) + + # Process relations + value_strings = [relation_to_string(relation) for relation in depend] value_relation_type = relation_type if len(value_strings) > 1: value_relation_type = or_relation_type relation_string = " | ".join(value_strings) relations.add((value_relation_type, relation_string)) + return relations @@ -126,23 +149,45 @@ name, version_string = unicode(name), unicode(version_string) skeleton = PackageSkeleton(DEB_PACKAGE, name, version_string) relations = set() - relations.update(parse_record_field( - version.record, "Provides", DEB_PROVIDES)) - relations.add(( - DEB_NAME_PROVIDES, - "%s = %s" % (version.package.name, version.version))) - relations.update(parse_record_field( - version.record, "Pre-Depends", DEB_REQUIRES, DEB_OR_REQUIRES)) - relations.update(parse_record_field( - version.record, "Depends", DEB_REQUIRES, DEB_OR_REQUIRES)) - - relations.add(( - DEB_UPGRADES, "%s < %s" % (version.package.name, version.version))) - - relations.update(parse_record_field( - version.record, "Conflicts", DEB_CONFLICTS)) - relations.update(parse_record_field( - version.record, "Breaks", DEB_CONFLICTS)) + + relations.update(parse_record_dependencies(version.provides, DEB_PROVIDES)) + relations.add( + ( + DEB_NAME_PROVIDES, + f"{version.package.name} = {version.version}", + ), + ) + relations.update( + parse_record_dependencies( + version.get_dependencies("PreDepends"), + DEB_REQUIRES, + DEB_OR_REQUIRES, + ), + ) + relations.update( + parse_record_dependencies( + version.get_dependencies("Depends"), + DEB_REQUIRES, + DEB_OR_REQUIRES, + ), + ) + + relations.add( + (DEB_UPGRADES, f"{version.package.name} < {version.version}"), + ) + + relations.update( + parse_record_dependencies( + version.get_dependencies("Conflicts"), + DEB_CONFLICTS, + ), + ) + relations.update( + parse_record_dependencies( + version.get_dependencies("Breaks"), + DEB_CONFLICTS, + ), + ) skeleton.relations = sorted(relations) if with_info: diff -Nru landscape-client-23.02/landscape/lib/apt/package/store.py landscape-client-23.08/landscape/lib/apt/package/store.py --- landscape-client-23.02/landscape/lib/apt/package/store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/store.py 2023-08-17 21:19:32.000000000 +0000 @@ -20,7 +20,7 @@ """Raised when trying to add an invalid hash=>id lookaside database.""" -class HashIdStore(object): +class HashIdStore: """C{HashIdStore} stores package hash=>id mappings in a file. The file is a SQLite database that contains a single table called "hash". @@ -28,6 +28,7 @@ @param filename: The file where the mappings are persisted to. """ + _db = None def __init__(self, filename): @@ -43,8 +44,10 @@ @param hash_ids: a C{dict} of hash=>id mappings. """ for hash, id in iteritems(hash_ids): - cursor.execute("REPLACE INTO hash VALUES (?, ?)", - (id, sqlite3.Binary(hash))) + cursor.execute( + "REPLACE INTO hash VALUES (?, ?)", + (id, sqlite3.Binary(hash)), + ) @with_cursor def get_hash_id(self, cursor, hash): @@ -52,8 +55,10 @@ @param hash: a C{bytes} representing a hash. """ - cursor.execute("SELECT id FROM hash WHERE hash=?", - (sqlite3.Binary(hash),)) + cursor.execute( + "SELECT id FROM hash WHERE hash=?", + (sqlite3.Binary(hash),), + ) value = cursor.fetchone() if value: return value[0] @@ -107,11 +112,11 @@ """ def __init__(self, filename): - super(PackageStore, self).__init__(filename) + super().__init__(filename) self._hash_id_stores = [] def _ensure_schema(self): - super(PackageStore, self)._ensure_schema() + super()._ensure_schema() ensure_package_schema(self._db) def add_hash_id_db(self, filename): @@ -182,7 +187,7 @@ @with_cursor def remove_available(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) - cursor.execute("DELETE FROM available WHERE id IN (%s)" % id_list) + cursor.execute(f"DELETE FROM available WHERE id IN ({id_list})") @with_cursor def clear_available(self, cursor): @@ -201,8 +206,9 @@ @with_cursor def remove_available_upgrades(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) - cursor.execute("DELETE FROM available_upgrade WHERE id IN (%s)" - % id_list) + cursor.execute( + f"DELETE FROM available_upgrade WHERE id IN ({id_list})", + ) @with_cursor def clear_available_upgrades(self, cursor): @@ -221,7 +227,7 @@ @with_cursor def remove_autoremovable(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) - cursor.execute("DELETE FROM autoremovable WHERE id IN (%s)" % id_list) + cursor.execute(f"DELETE FROM autoremovable WHERE id IN ({id_list})") @with_cursor def clear_autoremovable(self, cursor): @@ -240,7 +246,7 @@ @with_cursor def remove_security(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) - cursor.execute("DELETE FROM security WHERE id IN (%s)" % id_list) + cursor.execute(f"DELETE FROM security WHERE id IN ({id_list})") @with_cursor def clear_security(self, cursor): @@ -259,7 +265,7 @@ @with_cursor def remove_installed(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) - cursor.execute("DELETE FROM installed WHERE id IN (%s)" % id_list) + cursor.execute(f"DELETE FROM installed WHERE id IN ({id_list})") @with_cursor def clear_installed(self, cursor): @@ -285,7 +291,7 @@ @with_cursor def remove_locked(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) - cursor.execute("DELETE FROM locked WHERE id IN (%s)" % id_list) + cursor.execute(f"DELETE FROM locked WHERE id IN ({id_list})") @with_cursor def clear_locked(self, cursor): @@ -295,15 +301,18 @@ @with_cursor def add_hash_id_request(self, cursor, hashes): hashes = list(hashes) - cursor.execute("INSERT INTO hash_id_request (hashes, timestamp)" - " VALUES (?,?)", - (sqlite3.Binary(bpickle.dumps(hashes)), time.time())) + cursor.execute( + "INSERT INTO hash_id_request (hashes, timestamp)" " VALUES (?,?)", + (sqlite3.Binary(bpickle.dumps(hashes)), time.time()), + ) return HashIDRequest(self._db, cursor.lastrowid) @with_cursor def get_hash_id_request(self, cursor, request_id): - cursor.execute("SELECT 1 FROM hash_id_request WHERE id=?", - (request_id,)) + cursor.execute( + "SELECT 1 FROM hash_id_request WHERE id=?", + (request_id,), + ) if not cursor.fetchone(): raise UnknownHashIDRequest(request_id) return HashIDRequest(self._db, request_id) @@ -320,24 +329,32 @@ @with_cursor def add_task(self, cursor, queue, data): data = bpickle.dumps(data) + now = time.time() cursor.execute( "INSERT INTO task (queue, timestamp, data) VALUES (?,?,?)", - (queue, time.time(), sqlite3.Binary(data))) - return PackageTask(self._db, cursor.lastrowid) + (queue, now, sqlite3.Binary(data)), + ) + return PackageTask(self._db, cursor.lastrowid, queue, now, data) @with_cursor def get_next_task(self, cursor, queue): - cursor.execute("SELECT id FROM task WHERE queue=? ORDER BY timestamp", - (queue,)) + cursor.execute( + "SELECT id, queue, timestamp, data FROM task " + "WHERE queue=? ORDER BY timestamp", + (queue,), + ) row = cursor.fetchone() if row: - return PackageTask(self._db, row[0]) + return PackageTask(self._db, row[0], row[1], row[2], row[3]) return None @with_cursor def clear_tasks(self, cursor, except_tasks=()): - cursor.execute("DELETE FROM task WHERE id NOT IN (%s)" % - ",".join([str(task.id) for task in except_tasks])) + cursor.execute( + "DELETE FROM task WHERE id NOT IN ({})".format( + ",".join([str(task.id) for task in except_tasks]), + ), + ) class FakePackageStore(PackageStore): @@ -346,36 +363,42 @@ """ def _ensure_schema(self): - super(FakePackageStore, self)._ensure_schema() + super()._ensure_schema() ensure_fake_package_schema(self._db) @with_cursor def save_message(self, cursor, message): - cursor.execute("INSERT INTO message (data) VALUES (?)", - (sqlite3.Binary(bpickle.dumps(message)),)) + cursor.execute( + "INSERT INTO message (data) VALUES (?)", + (sqlite3.Binary(bpickle.dumps(message)),), + ) @with_cursor def get_message_ids(self, cursor): - return [row[0] for row in - cursor.execute("SELECT id FROM message").fetchall()] + return [ + row[0] + for row in cursor.execute("SELECT id FROM message").fetchall() + ] @with_cursor def save_message_ids(self, cursor, message_ids): cursor.executemany( "INSERT INTO message (id) VALUES (?)", - [(message_id,) for message_id in message_ids]) + [(message_id,) for message_id in message_ids], + ) @with_cursor def get_messages_by_ids(self, cursor, message_ids): params = ", ".join(["?"] * len(message_ids)) result = cursor.execute( - "SELECT id, data FROM message WHERE id IN (%s) " - "ORDER BY id" % params, tuple(message_ids)).fetchall() + f"SELECT id, data FROM message WHERE id IN ({params}) " + "ORDER BY id", + tuple(message_ids), + ).fetchall() return [(row[0], bytes(row[1])) for row in result] -class HashIDRequest(object): - +class HashIDRequest: def __init__(self, db, id): self._db = db self.id = id @@ -383,33 +406,43 @@ @property @with_cursor def hashes(self, cursor): - cursor.execute("SELECT hashes FROM hash_id_request WHERE id=?", - (self.id,)) + cursor.execute( + "SELECT hashes FROM hash_id_request WHERE id=?", + (self.id,), + ) return bpickle.loads(bytes(cursor.fetchone()[0])) @with_cursor def _get_timestamp(self, cursor): - cursor.execute("SELECT timestamp FROM hash_id_request WHERE id=?", - (self.id,)) + cursor.execute( + "SELECT timestamp FROM hash_id_request WHERE id=?", + (self.id,), + ) return cursor.fetchone()[0] @with_cursor def _set_timestamp(self, cursor, value): - cursor.execute("UPDATE hash_id_request SET timestamp=? WHERE id=?", - (value, self.id)) + cursor.execute( + "UPDATE hash_id_request SET timestamp=? WHERE id=?", + (value, self.id), + ) timestamp = property(_get_timestamp, _set_timestamp) @with_cursor def _get_message_id(self, cursor): - cursor.execute("SELECT message_id FROM hash_id_request WHERE id=?", - (self.id,)) + cursor.execute( + "SELECT message_id FROM hash_id_request WHERE id=?", + (self.id,), + ) return cursor.fetchone()[0] @with_cursor def _set_message_id(self, cursor, value): - cursor.execute("UPDATE hash_id_request SET message_id=? WHERE id=?", - (value, self.id)) + cursor.execute( + "UPDATE hash_id_request SET message_id=? WHERE id=?", + (value, self.id), + ) message_id = property(_get_message_id, _set_message_id) @@ -418,23 +451,13 @@ cursor.execute("DELETE FROM hash_id_request WHERE id=?", (self.id,)) -class PackageTask(object): - - def __init__(self, db, id): +class PackageTask: + def __init__(self, db, cid, queue, timestamp, data): self._db = db - self.id = id - - cursor = db.cursor() - try: - cursor.execute("SELECT queue, timestamp, data FROM task " - "WHERE id=?", (id,)) - row = cursor.fetchone() - finally: - cursor.close() - - self.queue = row[0] - self.timestamp = row[1] - self.data = bpickle.loads(bytes(row[2])) + self.id = cid + self.queue = queue + self.timestamp = timestamp + self.data = bpickle.loads(data) @with_cursor def remove(self, cursor): @@ -448,8 +471,9 @@ """ cursor = db.cursor() try: - cursor.execute("CREATE TABLE hash" - " (id INTEGER PRIMARY KEY, hash BLOB UNIQUE)") + cursor.execute( + "CREATE TABLE hash" " (id INTEGER PRIMARY KEY, hash BLOB UNIQUE)", + ) except (sqlite3.OperationalError, sqlite3.DatabaseError): cursor.close() db.rollback() @@ -469,24 +493,26 @@ # try block. cursor = db.cursor() try: - cursor.execute("CREATE TABLE security" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE autoremovable" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE locked" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE available" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE available_upgrade" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE installed" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE hash_id_request" - " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP," - " message_id INTEGER, hashes BLOB)") - cursor.execute("CREATE TABLE task" - " (id INTEGER PRIMARY KEY, queue TEXT," - " timestamp TIMESTAMP, data BLOB)") + cursor.execute("CREATE TABLE security" " (id INTEGER PRIMARY KEY)") + cursor.execute( + "CREATE TABLE autoremovable" " (id INTEGER PRIMARY KEY)", + ) + cursor.execute("CREATE TABLE locked" " (id INTEGER PRIMARY KEY)") + cursor.execute("CREATE TABLE available" " (id INTEGER PRIMARY KEY)") + cursor.execute( + "CREATE TABLE available_upgrade" " (id INTEGER PRIMARY KEY)", + ) + cursor.execute("CREATE TABLE installed" " (id INTEGER PRIMARY KEY)") + cursor.execute( + "CREATE TABLE hash_id_request" + " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP," + " message_id INTEGER, hashes BLOB)", + ) + cursor.execute( + "CREATE TABLE task" + " (id INTEGER PRIMARY KEY, queue TEXT," + " timestamp TIMESTAMP, data BLOB)", + ) except sqlite3.OperationalError: cursor.close() db.rollback() @@ -498,8 +524,9 @@ def ensure_fake_package_schema(db): cursor = db.cursor() try: - cursor.execute("CREATE TABLE message" - " (id INTEGER PRIMARY KEY, data BLOB)") + cursor.execute( + "CREATE TABLE message" " (id INTEGER PRIMARY KEY, data BLOB)", + ) except (sqlite3.OperationalError, sqlite3.DatabaseError): cursor.close() db.rollback() diff -Nru landscape-client-23.02/landscape/lib/apt/package/testing.py landscape-client-23.08/landscape/lib/apt/package/testing.py --- landscape-client-23.02/landscape/lib/apt/package/testing.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/testing.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,19 +4,24 @@ import apt_inst import apt_pkg +from landscape.lib import base64 from landscape.lib.apt.package.facade import AptFacade from landscape.lib.fs import append_binary_file from landscape.lib.fs import create_binary_file -from landscape.lib import base64 -class AptFacadeHelper(object): +class AptFacadeHelper: """Helper that sets up an AptFacade with a tempdir as its root.""" def set_up(self, test_case): test_case.apt_root = test_case.makeDir() self.dpkg_status = os.path.join( - test_case.apt_root, "var", "lib", "dpkg", "status") + test_case.apt_root, + "var", + "lib", + "dpkg", + "status", + ) test_case.Facade = AptFacade test_case.facade = AptFacade(root=test_case.apt_root) test_case.facade.refetch_package_index = True @@ -29,9 +34,15 @@ test_case._touch_packages_file = self._touch_packages_file test_case._hash_packages_by_name = self._hash_packages_by_name - def _add_package(self, packages_file, name, architecture="all", - version="1.0", description="description", - control_fields=None): + def _add_package( + self, + packages_file, + name, + architecture="all", + version="1.0", + description="description", + control_fields=None, + ): if control_fields is None: control_fields = {} package_stanza = { @@ -43,22 +54,25 @@ "Architecture": architecture, "Source": "source", "Version": version, - "Description": "short description\n " + description} + "Description": "short description\n " + description, + } package_stanza.update(control_fields) try: with open(packages_file, "rb") as src: packages = src.read().split(b"\n\n") - except IOError: + except OSError: packages = [] if b"" in packages: packages.remove(b"") - new_package = u"\n".join([ - u"{}: {}".format(key, package_stanza[key]) - for key in apt_pkg.REWRITE_PACKAGE_ORDER - if key in package_stanza - ]).encode("utf-8") + new_package = "\n".join( + [ + "{}: {}".format(key, package_stanza[key]) + for key in apt_pkg.REWRITE_PACKAGE_ORDER + if key in package_stanza + ], + ).encode("utf-8") packages.append(new_package) with open(packages_file, "wb", 0) as dest: @@ -66,15 +80,24 @@ dest.write(b"\n\n".join(sorted(packages))) dest.write(b"\n") - def _add_system_package(self, name, architecture="all", version="1.0", - control_fields=None): + def _add_system_package( + self, + name, + architecture="all", + version="1.0", + control_fields=None, + ): """Add a package to the dpkg status file.""" system_control_fields = {"Status": "install ok installed"} if control_fields is not None: system_control_fields.update(control_fields) self._add_package( - self.dpkg_status, name, architecture=architecture, version=version, - control_fields=system_control_fields) + self.dpkg_status, + name, + architecture=architecture, + version=version, + control_fields=system_control_fields, + ) def _install_deb_file(self, path): """Fake the the given deb file is installed in the system.""" @@ -87,9 +110,15 @@ status = b"\n".join(lines) append_binary_file(self.dpkg_status, status + b"\n\n") - def _add_package_to_deb_dir(self, path, name, architecture="all", - version="1.0", description="description", - control_fields=None): + def _add_package_to_deb_dir( + self, + path, + name, + architecture="all", + version="1.0", + description="description", + control_fields=None, + ): """Add fake package information to a directory. There will only be basic information about the package @@ -99,9 +128,13 @@ if control_fields is None: control_fields = {} self._add_package( - os.path.join(path, "Packages"), name, architecture=architecture, - version=version, description=description, - control_fields=control_fields) + os.path.join(path, "Packages"), + name, + architecture=architecture, + version=version, + description=description, + control_fields=control_fields, + ) def _touch_packages_file(self, deb_dir): """Make sure the Packages file gets a newer mtime value. @@ -121,15 +154,14 @@ """ hash_ids = {} for version in facade.get_packages_by_name(package_name): - skeleton = facade.get_package_skeleton( - version, with_info=False) + skeleton = facade.get_package_skeleton(version, with_info=False) hash = skeleton.get_hash() facade._pkg2hash[(version.package, version)] = hash hash_ids[hash] = version.package.id store.set_hash_ids(hash_ids) -class SimpleRepositoryHelper(object): +class SimpleRepositoryHelper: """Helper for adding a simple repository to the facade. This helper requires that C{test_case.facade} has already been set @@ -151,175 +183,193 @@ PKGNAME_VERSION_RELATIONS = "version-relations_1.0_all.deb" PKGNAME_MULTIPLE_RELATIONS = "multiple-relations_1.0_all.deb" PKGNAME_OR_RELATIONS = "or-relations_1.0_all.deb" +PKGNAME_BROKEN_DESCRIPTION = "brokendescription_1.0_all.deb" -PKGDEB1 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2NjExNDQ5MyAgMCAgICAgMCAgICAgMT" - "AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjYxMTQ0OTMg" - "IDAgICAgIDAgICAgIDEwMDY0NCAgNDUyICAgICAgIGAKH4sIAAAAAAAAA+3UQW+bMB" - "QHcM58Ch+7QwCbEJpomzat0rTDpmiRenfNC7EGNnuGSOmnnwMlyVK1O6VT1feTkJ+e" - "/wRh40RxcHGJl2dZP3rnY1/zJJsmIs9Fvs/5UQQsC15A51qJjAVobftc7l/zr1QU10" - "XmutpdeP9n0+mT+8+5+Hv/fSOdBiyh/b84MYM1n2fz7G4t0+u5SvMkhbTgs3wu+CwB" - "xjqHsdtIhLiwKjayBh6rjTQlVLaMbuBOSxOV92FAXuX5V9a0aKv/eP5zkZyf/1TQ+X" - "8RS6l+yRIWrD/Y4S2g09Ys2HYo+AShAun81ApU2099Rds1PFyitqjb3YLZZj8hq/Az" - "qo1ufa5D/4uyqnwIJjfQgCncgjUICL87jdA/jF19OGmND3wXHvLn4UfJn6BsXY/hsT" - "7Jj63jLauuLMG1/gb3UB3iY+MY/mLNutJqn1ZjeYgfOsf8Eu1WF9C/6lANq/rN+I+s" - "qqCYrPS9XxlxHX6X2rT+AvQLuv8Gt5b90FDDDpC9L4fOJ/PQiQy0H/3COIW6GXZh1d" - "W1xB0P2Umb078wIYQQQgghhBBCCCGEEEIIIYS8UX8AYydx2gAoAABkYXRhLnRhci5n" - "eiAgICAgMTE2NjExNDQ5MyAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTQgICAgICAgYA" - "ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqYmBkbm5kDlIHpI0YFEwZ6ABK" - "i0sSixQUGIry80vwqSMkP0SBnn5pcZH+YIp/EwYDIMd4NP7pGP/FGYlFqfqDJ/4NzY" - "xNRuOf3vGfkp+sPzji38jEwHA0/gci/vMSc1MN9Qc6/o2B7NH4H7j4T85IzEtPzclP" - "13NJTcpMzNNLr6Iw/s1MTHDGv5GxOSz+zUxNjYDxbw7kMSgYjMY/zYF8NwdHVm2jKx" - "Mzepwz6J7y5jpkIOH6sDKssF1rmUqYzBX2piZj9zyFad5RHv8dLoXsqua2spF3v+PQ" - "ffXIlN8aYepsu3x2u0202VX+QFC10st6vvMfDdacgtdzKtpe5G5tuFYx5elcpXm27O" - "d8LH7Oj3mqP7VgD8P6dTmJ33dsPnpuBnPO3SvLDNlu6ay9It6yZon0BIZRMApGwSgY" - "BaNgFIyCUTAKRsEoGAWjYBSMglEwCkbBKBgFo2AUjIJRMApGAUkAADhX8vgAKAAA " - ).encode("ascii") - -PKGDEB2 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2NjExNDUyMiAgMCAgICAgMCAgICAgMT" - "AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjYxMTQ1MjIg" - "IDAgICAgIDAgICAgIDEwMDY0NCAgNDUyICAgICAgIGAKH4sIAAAAAAAAA+3UTY/TMB" - "AG4JzzK3yEQ/Phxk1aAQKxEuIAqrYSd+NMU4vEDuOkUvfX4yabthQBpy5aMY9UZTR+" - "06ieuFEc3Fzi5UIMV+/6OtRpIrKE5/l8zn0/z9MsYCJ4Ar3rJDIWoLXdn3J/W3+mor" - "gphesbd+P5L7Lst/NPU/7z/H2DLwKW0PxvrixSlYkiAVGIxZJnaSHFdilUDplabnnG" - "WO8wdjuJEJdWxUY2wGO1k6aC2lbRHXzV0kTVQxiQZ3n+lTUd2vofnv+cJ9fnf57S+X" - "8Sa6m+yQpWbDjY4RdAp61Zsf1Y8BlCDdL5pQ2oblj6gLZvebhGbVF3hxWz7XFB1uE7" - "VDvd+VyP/htlXfsQzO6gBVO6FWsREL73GmF4GHvx+qI1PfBleMpfh39J3oOyTTOFp/" - "oiP7XOt2z6qgLX+RvcY3WKT41z+L0121qrY1pN5Sl+6pzza7R7XcLwU8dq3NWPxr9k" - "dQ3lbKMf/M7wIvwkten8B9Bv6PEd3Fv2WUMDB0D2qho7b81jJzLQvfEb4xTqdpzCpm" - "8aiQcesos2p39hQgghhBBCCCGEEEIIIYQQQgj5T/0AyM2cyQAoAABkYXRhLnRhci5n" - "eiAgICAgMTE2NjExNDUyMiAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTMgICAgICAgYA" - "ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqYmBkbm5sbAgUNzc3NGZQMGWg" - "AygtLkksUlBgKMrPL8GnjpD8EAV6+qXFRfqDLP6BHCOT0finX/wXZyQWpeoPnvg3ND" - "MyG41/esd/Sn6y/uCIfyNj89Hyf0DiPy8xN9VIf6Dj39jY3HQ0/gcu/pMzEvPSU3Py" - "0/VcUpMyE/P00qsojH8zExOc8Q/M7Yj4Bxb8BobmBsDkomAwGv80B/LdHBzX6hpdmZ" - "jR45xB99RGrkMGEq4Pbf0L3UWDL4XIRIk6Hjx7Urzj6SSxS/YTzKbu28sqe/64oPmF" - "JGPj3lqR1cLMdz12u04rLHp/gM2y0mv3HOc/GqxvCl7PqWh7kbux6VrFk69zlefZsu" - "v5WPycH/NUv7VgF8N6vfeBcgXp3NlnBFNDw5eZsd1as/aK+JzyvZ0TGEbBKBgFo2AU" - "jIJRMApGwSgYBaNgFIyCUTAKRsEoGAWjYBSMglEwCkbBKBgFJAEAu4OlKQAoAAAK" - ).encode("ascii") - -PKGDEB3 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2OTE0ODIwMyAgMCAgICAgMCAgICAgMT" - "AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjkxNDgyMDMg" - "IDAgICAgIDAgICAgIDEwMDY0NCAgNDUxICAgICAgIGAKH4sIAAAAAAAAA+3UwY7TMB" - "AG4JzzFD7CoUkax7iqYAViJcQBVFGJu3GmqbWJHcZJpe7T4yabtnS1cOqiFfNJVUbj" - "P43qiZuk0dVlgRRiuAaX16GeZ0JwWRSF4KEvZc4jJqJn0PtOIWMROtf9Kfe39RcqSZ" - "tS+L7xV57/m6J4cv7zef77/EODi4hlNP+r4yIrc1mUUs43C1VmhcxLEAKkFouCbzRj" - "vcfUbxVCWjqdWtUAT/VW2QpqVyW38MMom1T3cURe5PnXznbo6n94/mWeXZ5/ntP5fx" - "Yrpe9UBUs2HOz4O6A3zi7Zbiz4DKEG5cPSGnQ3LH1C17c8XqFxaLr9krn2sKDq+APq" - "relCrsfwjaquQwhmt9CCLf2StQgIP3uDMDyMvXp31poe+Do+5i/Dj5LfQLummcJTfZ" - "afWqdb1n1Vge/CDf6hOsanxin80dlNbfQhrafyGD92TvkVup0pYfipYzXu6mcbXrK6" - "hnK2NvdhZ/JF/EUZ24UPYNjQwzu4c+yrgQb2gOxtNXbe24dOYqG7CRvjNZp2nMK6bx" - "qFex6zszanf2FCCCGEEEIIIYQQQgghhBBCCPlP/QK+dA1dACgAAApkYXRhLnRhci5n" - "eiAgICAgMTE2OTE0ODIwMyAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTkgICAgICAgYA" - "ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqamxuYmJiagQUNzc3MmJQMGWg" - "AygtLkksUlBgKMrPL8GnjpD8EAV6+qXFRfqDKf4NGQyAHOPR+Kdj/BdnJBal6g+e+D" - "c0MzYZjX96x39KfrL+4Ih/IxMDw9H4H4j4z0vMTTXWH8j4B9b/hsYmBqaj8T9w8Z+c" - "kZiXnpqTn67nkpqUmZinl15FYfybmZjgjH8jY3NE/JuYAePfHKieQcFgNP5pDuS7OT" - "jUTq53ZWJGj3MG3VPeXIcMJFwfVoYVtmstW+Imc4W9qcnYPU9hmneUx3+HSyG7qrmt" - "bOTd7zh0Xz0y5bdGmDrbLp/dbhNtdpU/EFSt9LKe7/xHgzWn4PWcirYXuVsbrlVMeT" - "pXaZ4t+zkfi5/zY57qTy3Yw7B+XU7g+8L07rmG7Fe2bVxmyHZLZ+0V8Sl2Xj8mMIyC" - "UTAKRsEoGAWjYBSMglEwCkbBKBgFo2AUjIJRMApGwSgYBaNgFIyCUTAKSAIAY/FOKA" - "AoAAAK" - ).encode("ascii") - -PKGDEB4 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTI3NjUxMTU3OC41MCAgICAgMCAgICAgNj" - "Q0ICAgICA0\nICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEyNzY1MTE1Nz" - "guNTAgICAgIDAgICAgIDY0\nNCAgICAgMjk1ICAgICAgIGAKH4sIAFoFFkwC/+3TwU" - "6EMBAGYM48RV9goS0dqnszMSbeTEy8F6iE\nCJS04MGnt2GzBzHqiVWT/7u0yVCm8G" - "eyPNkdjzTRukbbdd0LoTgpLqmQCRdCckoYJRewhNn4eBXv\n3Pzdcz/Vtx/3T2R57c" - "bZu37n/EulvsxfqnKTvyyFTBhH/rt7MPWLae2RjWawIn2yPnRuPLLX00Zk\n4uBtb0" - "2Ixfsx/qu+t83hsXuLRwRPb22ofTfN65kbFsww9ZYtU+tNY9l0ennK7pxnsw1zN7bn" - "YsjS\nD72LT72Lc2eVJrDb/A8NhWUIvzj/nMR2/kkKzP8lNERFJZWOGWiqiF89ayVt" - "qbWhSlfimrEsD26w\nGEEAAAAAAAAAAAAAAAAAAIC/6x1piYqhACgAAApkYXRhLnRh" - "ci5neiAgICAgMTI3NjUxMTU3OC41\nMCAgICAgMCAgICAgNjQ0ICAgICAxNDUgICAg" - "ICAgYAofiwgAWgUWTAL/7dFBCsMgEEDRWfcUniCZ\nsU57kJ5ASJdFSOz9K9kULLQr" - "C4H/NiPqQvnTLMNpc3XfZ9PPfW2W1JOae9s3i5okuPzBc6t5bU9Z\nS6nf7v067z93" - "ENO8lcd9fP/LZ/d3f4td/6h+lqD0H+7W6ocl13wSAAAAAAAAAAAAAAAAAAfzAqr5\n" - "GFYAKAAACg==\n" - ).encode("ascii") +PKGDEB1 = ( + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2NjExNDQ5MyAgMCAgICAgMCAgICAgMT" + b"AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjYxMTQ0OTMg" + b"IDAgICAgIDAgICAgIDEwMDY0NCAgNDUyICAgICAgIGAKH4sIAAAAAAAAA+3UQW+bMB" + b"QHcM58Ch+7QwCbEJpomzat0rTDpmiRenfNC7EGNnuGSOmnnwMlyVK1O6VT1feTkJ+e" + b"/wRh40RxcHGJl2dZP3rnY1/zJJsmIs9Fvs/5UQQsC15A51qJjAVobftc7l/zr1QU10" + b"XmutpdeP9n0+mT+8+5+Hv/fSOdBiyh/b84MYM1n2fz7G4t0+u5SvMkhbTgs3wu+CwB" + b"xjqHsdtIhLiwKjayBh6rjTQlVLaMbuBOSxOV92FAXuX5V9a0aKv/eP5zkZyf/1TQ+X" + b"8RS6l+yRIWrD/Y4S2g09Ys2HYo+AShAun81ApU2099Rds1PFyitqjb3YLZZj8hq/Az" + b"qo1ufa5D/4uyqnwIJjfQgCncgjUICL87jdA/jF19OGmND3wXHvLn4UfJn6BsXY/hsT" + b"7Jj63jLauuLMG1/gb3UB3iY+MY/mLNutJqn1ZjeYgfOsf8Eu1WF9C/6lANq/rN+I+s" + b"qqCYrPS9XxlxHX6X2rT+AvQLuv8Gt5b90FDDDpC9L4fOJ/PQiQy0H/3COIW6GXZh1d" + b"W1xB0P2Umb078wIYQQQgghhBBCCCGEEEIIIYS8UX8AYydx2gAoAABkYXRhLnRhci5n" + b"eiAgICAgMTE2NjExNDQ5MyAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTQgICAgICAgYA" + b"ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqYmBkbm5kDlIHpI0YFEwZ6ABK" + b"i0sSixQUGIry80vwqSMkP0SBnn5pcZH+YIp/EwYDIMd4NP7pGP/FGYlFqfqDJ/4NzY" + b"xNRuOf3vGfkp+sPzji38jEwHA0/gci/vMSc1MN9Qc6/o2B7NH4H7j4T85IzEtPzclP" + b"13NJTcpMzNNLr6Iw/s1MTHDGv5GxOSz+zUxNjYDxbw7kMSgYjMY/zYF8NwdHVm2jKx" + b"Mzepwz6J7y5jpkIOH6sDKssF1rmUqYzBX2piZj9zyFad5RHv8dLoXsqua2spF3v+PQ" + b"ffXIlN8aYepsu3x2u0202VX+QFC10st6vvMfDdacgtdzKtpe5G5tuFYx5elcpXm27O" + b"d8LH7Oj3mqP7VgD8P6dTmJ33dsPnpuBnPO3SvLDNlu6ay9It6yZon0BIZRMApGwSgY" + b"BaNgFIyCUTAKRsEoGAWjYBSMglEwCkbBKBgFo2AUjIJRMApGAUkAADhX8vgAKAAA " +) + +PKGDEB2 = ( + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2NjExNDUyMiAgMCAgICAgMCAgICAgMT" + b"AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjYxMTQ1MjIg" + b"IDAgICAgIDAgICAgIDEwMDY0NCAgNDUyICAgICAgIGAKH4sIAAAAAAAAA+3UTY/TMB" + b"AG4JzzK3yEQ/Phxk1aAQKxEuIAqrYSd+NMU4vEDuOkUvfX4yabthQBpy5aMY9UZTR+" + b"06ieuFEc3Fzi5UIMV+/6OtRpIrKE5/l8zn0/z9MsYCJ4Ar3rJDIWoLXdn3J/W3+mor" + b"gphesbd+P5L7Lst/NPU/7z/H2DLwKW0PxvrixSlYkiAVGIxZJnaSHFdilUDplabnnG" + b"WO8wdjuJEJdWxUY2wGO1k6aC2lbRHXzV0kTVQxiQZ3n+lTUd2vofnv+cJ9fnf57S+X" + b"8Sa6m+yQpWbDjY4RdAp61Zsf1Y8BlCDdL5pQ2oblj6gLZvebhGbVF3hxWz7XFB1uE7" + b"VDvd+VyP/htlXfsQzO6gBVO6FWsREL73GmF4GHvx+qI1PfBleMpfh39J3oOyTTOFp/" + b"oiP7XOt2z6qgLX+RvcY3WKT41z+L0121qrY1pN5Sl+6pzza7R7XcLwU8dq3NWPxr9k" + b"dQ3lbKMf/M7wIvwkten8B9Bv6PEd3Fv2WUMDB0D2qho7b81jJzLQvfEb4xTqdpzCpm" + b"8aiQcesos2p39hQgghhBBCCCGEEEIIIYQQQgj5T/0AyM2cyQAoAABkYXRhLnRhci5n" + b"eiAgICAgMTE2NjExNDUyMiAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTMgICAgICAgYA" + b"ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqYmBkbm5sbAgUNzc3NGZQMGWg" + b"AygtLkksUlBgKMrPL8GnjpD8EAV6+qXFRfqDLP6BHCOT0finX/wXZyQWpeoPnvg3ND" + b"MyG41/esd/Sn6y/uCIfyNj89Hyf0DiPy8xN9VIf6Dj39jY3HQ0/gcu/pMzEvPSU3Py" + b"0/VcUpMyE/P00qsojH8zExOc8Q/M7Yj4Bxb8BobmBsDkomAwGv80B/LdHBzX6hpdmZ" + b"jR45xB99RGrkMGEq4Pbf0L3UWDL4XIRIk6Hjx7Urzj6SSxS/YTzKbu28sqe/64oPmF" + b"JGPj3lqR1cLMdz12u04rLHp/gM2y0mv3HOc/GqxvCl7PqWh7kbux6VrFk69zlefZsu" + b"v5WPycH/NUv7VgF8N6vfeBcgXp3NlnBFNDw5eZsd1as/aK+JzyvZ0TGEbBKBgFo2AU" + b"jIJRMApGwSgYBaNgFIyCUTAKRsEoGAWjYBSMglEwCkbBKBgFJAEAu4OlKQAoAAAK" +) + +PKGDEB3 = ( + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2OTE0ODIwMyAgMCAgICAgMCAgICAgMT" + b"AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjkxNDgyMDMg" + b"IDAgICAgIDAgICAgIDEwMDY0NCAgNDUxICAgICAgIGAKH4sIAAAAAAAAA+3UwY7TMB" + b"AG4JzzFD7CoUkax7iqYAViJcQBVFGJu3GmqbWJHcZJpe7T4yabtnS1cOqiFfNJVUbj" + b"P43qiZuk0dVlgRRiuAaX16GeZ0JwWRSF4KEvZc4jJqJn0PtOIWMROtf9Kfe39RcqSZ" + b"tS+L7xV57/m6J4cv7zef77/EODi4hlNP+r4yIrc1mUUs43C1VmhcxLEAKkFouCbzRj" + b"vcfUbxVCWjqdWtUAT/VW2QpqVyW38MMom1T3cURe5PnXznbo6n94/mWeXZ5/ntP5fx" + b"Yrpe9UBUs2HOz4O6A3zi7Zbiz4DKEG5cPSGnQ3LH1C17c8XqFxaLr9krn2sKDq+APq" + b"relCrsfwjaquQwhmt9CCLf2StQgIP3uDMDyMvXp31poe+Do+5i/Dj5LfQLummcJTfZ" + b"afWqdb1n1Vge/CDf6hOsanxin80dlNbfQhrafyGD92TvkVup0pYfipYzXu6mcbXrK6" + b"hnK2NvdhZ/JF/EUZ24UPYNjQwzu4c+yrgQb2gOxtNXbe24dOYqG7CRvjNZp2nMK6bx" + b"qFex6zszanf2FCCCGEEEIIIYQQQgghhBBCCPlP/QK+dA1dACgAAApkYXRhLnRhci5n" + b"eiAgICAgMTE2OTE0ODIwMyAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTkgICAgICAgYA" + b"ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqamxuYmJiagQUNzc3MmJQMGWg" + b"AygtLkksUlBgKMrPL8GnjpD8EAV6+qXFRfqDKf4NGQyAHOPR+Kdj/BdnJBal6g+e+D" + b"c0MzYZjX96x39KfrL+4Ih/IxMDw9H4H4j4z0vMTTXWH8j4B9b/hsYmBqaj8T9w8Z+c" + b"kZiXnpqTn67nkpqUmZinl15FYfybmZjgjH8jY3NE/JuYAePfHKieQcFgNP5pDuS7OT" + b"jUTq53ZWJGj3MG3VPeXIcMJFwfVoYVtmstW+Imc4W9qcnYPU9hmneUx3+HSyG7qrmt" + b"bOTd7zh0Xz0y5bdGmDrbLp/dbhNtdpU/EFSt9LKe7/xHgzWn4PWcirYXuVsbrlVMeT" + b"pXaZ4t+zkfi5/zY57qTy3Yw7B+XU7g+8L07rmG7Fe2bVxmyHZLZ+0V8Sl2Xj8mMIyC" + b"UTAKRsEoGAWjYBSMglEwCkbBKBgFo2AUjIJRMApGwSgYBaNgFIyCUTAKSAIAY/FOKA" + b"AoAAAK" +) + +PKGDEB4 = ( + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTI3NjUxMTU3OC41MCAgICAgMCAgICAgNj" + b"Q0ICAgICA0\nICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEyNzY1MTE1Nz" + b"guNTAgICAgIDAgICAgIDY0\nNCAgICAgMjk1ICAgICAgIGAKH4sIAFoFFkwC/+3TwU" + b"6EMBAGYM48RV9goS0dqnszMSbeTEy8F6iE\nCJS04MGnt2GzBzHqiVWT/7u0yVCm8G" + b"eyPNkdjzTRukbbdd0LoTgpLqmQCRdCckoYJRewhNn4eBXv\n3Pzdcz/Vtx/3T2R57c" + b"bZu37n/EulvsxfqnKTvyyFTBhH/rt7MPWLae2RjWawIn2yPnRuPLLX00Zk\n4uBtb0" + b"2Ixfsx/qu+t83hsXuLRwRPb22ofTfN65kbFsww9ZYtU+tNY9l0ennK7pxnsw1zN7bn" + b"YsjS\nD72LT72Lc2eVJrDb/A8NhWUIvzj/nMR2/kkKzP8lNERFJZWOGWiqiF89ayVt" + b"qbWhSlfimrEsD26w\nGEEAAAAAAAAAAAAAAAAAAIC/6x1piYqhACgAAApkYXRhLnRh" + b"ci5neiAgICAgMTI3NjUxMTU3OC41\nMCAgICAgMCAgICAgNjQ0ICAgICAxNDUgICAg" + b"ICAgYAofiwgAWgUWTAL/7dFBCsMgEEDRWfcUniCZ\nsU57kJ5ASJdFSOz9K9kULLQr" + b"C4H/NiPqQvnTLMNpc3XfZ9PPfW2W1JOae9s3i5okuPzBc6t5bU9Z\nS6nf7v067z93" + b"ENO8lcd9fP/LZ/d3f4td/6h+lqD0H+7W6ocl13wSAAAAAAAAAAAAAAAAAAfzAqr5\n" + b"GFYAKAAACg==\n" +) PKGDEB_MINIMAL = ( - "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxNzg5MDQ3OSAgMCAgICAgMCAgICAgMTAwNj" - "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTc4OTA0NzkgIDAgICA" - "gIDAgICAgIDEw MDY0NCAgMjU4ICAgICAgIGAKH4sIAAAAAAACA+3Rz0rEMBAG8Jz7FPME" - "3aT/FoqIC54EwZP3mB1s 1jQp0yz6+HaVBRFcTxWE7wfJQDKZHL5yo1anF9u2PVWzbfXXe" - "qaM6Zq66pqurZQ2uqorRa36A8c5 WyFST4ck8ULfb/f/VLlxKWZJYeX8u6b5Mf+qbr7lb7" - "rliDTyX92DdS/2mXsaffSjDcUjy+xT7MmU utiJG3xml4+ytNgQinvrY14WS093aYh0dVj" - "2G36z4xS4dGm8Lm55duKn/DFmd55M0+dX9OrzQDHR nieOe47O80xJKOWBhYSDPb2cy0IB" - "AAAAAAAAAAAAAAAAAAAAAMBF70s1/foAKAAAZGF0YS50YXIu Z3ogICAgIDEzMTc4OTA0N" - "zkgIDAgICAgIDAgICAgIDEwMDY0NCAgMTA3ICAgICAgIGAKH4sIAAAA AAACA+3KsQ3CQB" - "AEwCvlK4D/N4frMSGBkQz0jwmQiHCEo5lkpd09HOPv6mrMfGcbs37nR7R2Pg01" - "ew5r32rvNUrGDp73x7SUEpfrbZl//LZ2AAAAAAAAAAAA2NELx33R7wAoAAAK" -).encode("ascii") + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxNzg5MDQ3OSAgMCAgICAgMCAgICAgMTAwNj" + b"Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTc4OTA0NzkgIDAgICA" + b"gIDAgICAgIDEw MDY0NCAgMjU4ICAgICAgIGAKH4sIAAAAAAACA+3Rz0rEMBAG8Jz7FPME" + b"3aT/FoqIC54EwZP3mB1s 1jQp0yz6+HaVBRFcTxWE7wfJQDKZHL5yo1anF9u2PVWzbfXXe" + b"qaM6Zq66pqurZQ2uqorRa36A8c5 WyFST4ck8ULfb/f/VLlxKWZJYeX8u6b5Mf+qbr7lb7" + b"rliDTyX92DdS/2mXsaffSjDcUjy+xT7MmU utiJG3xml4+ytNgQinvrY14WS093aYh0dVj" + b"2G36z4xS4dGm8Lm55duKn/DFmd55M0+dX9OrzQDHR nieOe47O80xJKOWBhYSDPb2cy0IB" + b"AAAAAAAAAAAAAAAAAAAAAMBF70s1/foAKAAAZGF0YS50YXIu Z3ogICAgIDEzMTc4OTA0N" + b"zkgIDAgICAgIDAgICAgIDEwMDY0NCAgMTA3ICAgICAgIGAKH4sIAAAA AAACA+3KsQ3CQB" + b"AEwCvlK4D/N4frMSGBkQz0jwmQiHCEo5lkpd09HOPv6mrMfGcbs37nR7R2Pg01" + b"ew5r32rvNUrGDp73x7SUEpfrbZl//LZ2AAAAAAAAAAAA2NELx33R7wAoAAAK" +) PKGDEB_SIMPLE_RELATIONS = ( - "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODUxNjMyMiAgMCAgICAgMCAgICAgMTAwNj" - "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1MTYzMjIgIDAgICA" - "gIDAgICAgIDEw MDY0NCAgMzQ0ICAgICAgIGAKH4sIAAAAAAACA+3R3UrDMBQH8F7nKc4L" - "rGu2tYMi4tQrQRkI3mdp tNnSpKTZ0Lc37TYVQb2aIvx/0Ob09DQfPek4Obksmud5P/J5n" - "n0cjxLOi1mez6ecT5KMZ5NJkVCe /IJtF4QnSlZr5+03dT+9/6fSsXQ2eGdO3P9iNvuy/3" - "mWf+o/L6Y8oQz9P7mlkBvxpErqdNMaNfLK iKCd7diD8l0MSuJpxu6VDMNDozvJll47r8N" - "LSa7t08KwhZe1DrFq6+NkwphYpEbXqlW26kpqvaqG mLO33DFx5eyj0TLElDyEnF16JTYx" - "s+pHHidzO12pYaYh4uxWaBvipXxJN662dLaO9wv1LPqDpNI1 53GtTnrd7re+iJu3uhGG2" - "v2hKdQiUC26w+Hp/fAU3Tna7f8BCa+OC1ekbfzwQ3HKEgAAAAAAAAAA AAAAAAAAAACAv/" - "EKgcHt1gAoAABkYXRhLnRhci5neiAgICAgMTMxODUxNjMyMiAgMCAgICAgMCAg ICAgMTA" - "wNjQ0ICAxMDcgICAgICAgYAofiwgAAAAAAAID7cqxDcJQEETBK8UVwH2b+64HQgIjGegf " - "CJCIIMLRTPKC3d0+/i6f5qpX21z52bdorR+m7Fl9imw5jhVDxQbu19txHYY4nS/r8uX3aw" - "cAAAAA AAAAAIANPQALnD6FACgAAAo=" -).encode("ascii") + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODUxNjMyMiAgMCAgICAgMCAgICAgMTAwNj" + b"Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1MTYzMjIgIDAgICA" + b"gIDAgICAgIDEw MDY0NCAgMzQ0ICAgICAgIGAKH4sIAAAAAAACA+3R3UrDMBQH8F7nKc4L" + b"rGu2tYMi4tQrQRkI3mdp tNnSpKTZ0Lc37TYVQb2aIvx/0Ob09DQfPek4Obksmud5P/J5n" + b"n0cjxLOi1mez6ecT5KMZ5NJkVCe /IJtF4QnSlZr5+03dT+9/6fSsXQ2eGdO3P9iNvuy/3" + b"mWf+o/L6Y8oQz9P7mlkBvxpErqdNMaNfLK iKCd7diD8l0MSuJpxu6VDMNDozvJll47r8N" + b"LSa7t08KwhZe1DrFq6+NkwphYpEbXqlW26kpqvaqG mLO33DFx5eyj0TLElDyEnF16JTYx" + b"s+pHHidzO12pYaYh4uxWaBvipXxJN662dLaO9wv1LPqDpNI1 53GtTnrd7re+iJu3uhGG2" + b"v2hKdQiUC26w+Hp/fAU3Tna7f8BCa+OC1ekbfzwQ3HKEgAAAAAAAAAA AAAAAAAAAACAv/" + b"EKgcHt1gAoAABkYXRhLnRhci5neiAgICAgMTMxODUxNjMyMiAgMCAgICAgMCAg ICAgMTA" + b"wNjQ0ICAxMDcgICAgICAgYAofiwgAAAAAAAID7cqxDcJQEETBK8UVwH2b+64HQgIjGegf " + b"CJCIIMLRTPKC3d0+/i6f5qpX21z52bdorR+m7Fl9imw5jhVDxQbu19txHYY4nS/r8uX3aw" + b"cAAAAA AAAAAIANPQALnD6FACgAAAo=" +) PKGDEB_VERSION_RELATIONS = ( - "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODUxNjQ5OCAgMCAgICAgMCAgICAgMTAwNj" - "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1MTY0OTggIDAgICA" - "gIDAgICAgIDEw MDY0NCAgMzUwICAgICAgIGAKH4sIAAAAAAACA+3RQUvDMBQH8Jz7KXLU" - "w7pmazcoczj1JAgDwXuW xTVbmpQkG/rtTds5RFBPGwj/H7R5fS9N07x0SM4ui6ZF0Y5sW" - "mRfx0+EsUleFNNxznKSsWw0HhNa kAvY+8AdpWS1tc78Mu+v+j+VDoU1wVl95v5P8vzH/h" - "eMfes/m7T9z9D/s1tyseMbWdKDdF5ZM3BS 8xADn7z0mZKyNEuepQjdQ628SJZOWafCe0l" - "t06a5ThZOVCrEWXsXV+Nax0ly8CAbada+pI2T6y5m 9Gp2Q0dpdp2ciqfKsXBvzatWIsSS" - "OIbta7O+euck38XSqh1jfj7v80tnD2otu491EUueuDIhXtKV 9NFWhs628X4r33jdaJkKW" - "8/jLrxwqun/bhH/z6iaa9r0B0NDxQOtuKeng2n31C6qzObz1HyaEAAA AAAAAAAAAAAAAA" - "AAAACAy/sAwTtOtwAoAABkYXRhLnRhci5neiAgICAgMTMxODUxNjQ5OCAgMCAg ICAgMCA" - "gICAgMTAwNjQ0ICAxMDcgICAgICAgYAofiwgAAAAAAAID7cqxEcIwEETRK0UVgCT7UD0Q " - "EpgZA/0DATNEEOHoveQHu7t9/F19GpmvtpH1s2/R2mGeemYfc9RW+9SjZGzgfr0d11LidL" - "6sy5ff rx0AAAAAAAAAAAA29AD/ixlwACgAAAo=" -).encode("ascii") + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODUxNjQ5OCAgMCAgICAgMCAgICAgMTAwNj" + b"Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1MTY0OTggIDAgICA" + b"gIDAgICAgIDEw MDY0NCAgMzUwICAgICAgIGAKH4sIAAAAAAACA+3RQUvDMBQH8Jz7KXLU" + b"w7pmazcoczj1JAgDwXuW xTVbmpQkG/rtTds5RFBPGwj/H7R5fS9N07x0SM4ui6ZF0Y5sW" + b"mRfx0+EsUleFNNxznKSsWw0HhNa kAvY+8AdpWS1tc78Mu+v+j+VDoU1wVl95v5P8vzH/h" + b"eMfes/m7T9z9D/s1tyseMbWdKDdF5ZM3BS 8xADn7z0mZKyNEuepQjdQ628SJZOWafCe0l" + b"t06a5ThZOVCrEWXsXV+Nax0ly8CAbada+pI2T6y5m 9Gp2Q0dpdp2ciqfKsXBvzatWIsSS" + b"OIbta7O+euck38XSqh1jfj7v80tnD2otu491EUueuDIhXtKV 9NFWhs628X4r33jdaJkKW" + b"8/jLrxwqun/bhH/z6iaa9r0B0NDxQOtuKeng2n31C6qzObz1HyaEAAA AAAAAAAAAAAAAA" + b"AAAACAy/sAwTtOtwAoAABkYXRhLnRhci5neiAgICAgMTMxODUxNjQ5OCAgMCAg ICAgMCA" + b"gICAgMTAwNjQ0ICAxMDcgICAgICAgYAofiwgAAAAAAAID7cqxEcIwEETRK0UVgCT7UD0Q " + b"EpgZA/0DATNEEOHoveQHu7t9/F19GpmvtpH1s2/R2mGeemYfc9RW+9SjZGzgfr0d11LidL" + b"6sy5ff rx0AAAAAAAAAAAA29AD/ixlwACgAAAo=" +) PKGDEB_MULTIPLE_RELATIONS = ( - "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODU4MDA3OSAgMCAgICAgMCAgICAgMTAwNj" - "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1ODAwNzkgIDAgICA" - "gIDAgICAgIDEw MDY0NCAgMzgzICAgICAgIGAKH4sIAAAAAAACA+3RXUvDMBQG4F7nV5xL" - "BVeb2nZQ5vDrShAGgvcx izaaNiXNRMEfb9atcwq6qwnC+8CW05N3bXcSH0d7lwTjPF+uf" - "Jwn2+sg4rzI8nERYjxKeJJmaUR5 9AcWnReOKLp/sq75Jbdr/5+Kj6VtvLNmz+dfZNmP51" - "+cZN/OnxdhmxKc/97NhHwWj6qkemG8bo0a OWWE17bp2J1yXShK4nHCbpX0/UWtO8lmTlu" - "n/VtJtl22hWHnTlbah9TChdsJY0JIja5Uq5p5V1Lr 1LyvOR1MTimNk8Ojz2bKNsFNagit" - "Gif0vq4yOphOv+yl7NI2D0ZLH34v1+XyOZN1bOil7MIp8RxS 98uVb92pb6Thne2Lnqv+h" - "fuKHw1Vym6Ebnz4KFfSta0amjyF7zP1KuowuVjaehr+RyedblezOg/T anQtDLWrOZOvhK" - "dKdJt504swC9XRg3WkhKxomH/MIgAAAAAAAAAAAAAAAAAAAACAHT4AFDs6bAAo AAAKZGF" - "0YS50YXIuZ3ogICAgIDEzMTg1ODAwNzkgIDAgICAgIDAgICAgIDEwMDY0NCAgMTA3ICAg " - "ICAgIGAKH4sIAAAAAAACA+3KsRHCMBBE0StFFYBkfFY9EBKYGWP3DwTMEEGEo/eSH+wejv" - "F39aln vtp61s++RWvTeBpy6tmjtjqMLUrGDrb7el5Kicv1tsxffr92AAAAAAAAAAAA2NE" - "Db6L1AQAoAAAK" -).encode("ascii") + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODU4MDA3OSAgMCAgICAgMCAgICAgMTAwNj" + b"Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1ODAwNzkgIDAgICA" + b"gIDAgICAgIDEw MDY0NCAgMzgzICAgICAgIGAKH4sIAAAAAAACA+3RXUvDMBQG4F7nV5xL" + b"BVeb2nZQ5vDrShAGgvcx izaaNiXNRMEfb9atcwq6qwnC+8CW05N3bXcSH0d7lwTjPF+uf" + b"Jwn2+sg4rzI8nERYjxKeJJmaUR5 9AcWnReOKLp/sq75Jbdr/5+Kj6VtvLNmz+dfZNmP51" + b"+cZN/OnxdhmxKc/97NhHwWj6qkemG8bo0a OWWE17bp2J1yXShK4nHCbpX0/UWtO8lmTlu" + b"n/VtJtl22hWHnTlbah9TChdsJY0JIja5Uq5p5V1Lr 1LyvOR1MTimNk8Ojz2bKNsFNagit" + b"Gif0vq4yOphOv+yl7NI2D0ZLH34v1+XyOZN1bOil7MIp8RxS 98uVb92pb6Thne2Lnqv+h" + b"fuKHw1Vym6Ebnz4KFfSta0amjyF7zP1KuowuVjaehr+RyedblezOg/T anQtDLWrOZOvhK" + b"dKdJt504swC9XRg3WkhKxomH/MIgAAAAAAAAAAAAAAAAAAAACAHT4AFDs6bAAo AAAKZGF" + b"0YS50YXIuZ3ogICAgIDEzMTg1ODAwNzkgIDAgICAgIDAgICAgIDEwMDY0NCAgMTA3ICAg " + b"ICAgIGAKH4sIAAAAAAACA+3KsRHCMBBE0StFFYBkfFY9EBKYGWP3DwTMEEGEo/eSH+wejv" + b"F39aln vtp61s++RWvTeBpy6tmjtjqMLUrGDrb7el5Kicv1tsxffr92AAAAAAAAAAAA2NE" + b"Db6L1AQAoAAAK" +) PKGDEB_OR_RELATIONS = ( - "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxNzg4ODg2OSAgMCAgICAgMCAgICAgMTAwNj" - "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTc4ODg4NjkgIDAgICA" - "gIDAgICAgIDEw MDY0NCAgMzc1ICAgICAgIGAKH4sIAAAAAAACA+3R30vDMBAH8D73r7g3" - "FbS23brBUFHwSRAFwfeY HmtmlpQ08wf4x3vbWB2Cig8ThO8H2qbp3fXCZcfJzuViXFXLZ" - "zGu8u3nRlIUo+GgHBXVUPaLvCwH CVXJH1h0UQWi5GHmg/sm7qfv/1R2rL2Lwdsdz380HH" - "45f5n6p/kXo0GeUI7579yt0o9qyhPy4Siw VdF416X3HDpZTKjI8vSOdVy9zE2n09tgfDD" - "xVTLa5bay6UXQjYkStQhSSFkrQXx0yS27uptQG7he rQvaPzmlMssP6O1jt0z7yD6sj9qE" - "XCvjolwcJnTlG0cnM7mf84uat5Yz7ednUqbTwbTrXi+kW2fm ylK7PiHFRkVqVCcnpf6kW" - "UrixtlX2uqZlKupXwcm47Rd1FwfUidLJh8b3qqyqr2qpJWTfzyxtC55 bi/2qfTcsJPvVi" - "+WWW4qSdw3J301WZoAAAAAAAAAAAAAAAAAAAAAAPzOO2wqjioAKAAACmRhdGEu dGFyLmd" - "6ICAgICAxMzE3ODg4ODY5ICAwICAgICAwICAgICAxMDA2NDQgIDEwNyAgICAgICBgCh+L " - "CAAAAAAAAgPtyrsRwjAURNFXiioAfZBcjwkJzIyB/oGAGSIc4eic5Aa7h2P8XX6Zen+3TD" - "1/9yNK" - "GadWR2ltRC651hGpxw4et/u8phTny3Vdfvy2dgAAAAAAAAAAANjRE6Lr2rEAKAAACg==" -).encode("ascii") + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxNzg4ODg2OSAgMCAgICAgMCAgICAgMTAwNj" + b"Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTc4ODg4NjkgIDAgICA" + b"gIDAgICAgIDEw MDY0NCAgMzc1ICAgICAgIGAKH4sIAAAAAAACA+3R30vDMBAH8D73r7g3" + b"FbS23brBUFHwSRAFwfeY HmtmlpQ08wf4x3vbWB2Cig8ThO8H2qbp3fXCZcfJzuViXFXLZ" + b"zGu8u3nRlIUo+GgHBXVUPaLvCwH CVXJH1h0UQWi5GHmg/sm7qfv/1R2rL2Lwdsdz380HH" + b"45f5n6p/kXo0GeUI7579yt0o9qyhPy4Siw VdF416X3HDpZTKjI8vSOdVy9zE2n09tgfDD" + b"xVTLa5bay6UXQjYkStQhSSFkrQXx0yS27uptQG7he rQvaPzmlMssP6O1jt0z7yD6sj9qE" + b"XCvjolwcJnTlG0cnM7mf84uat5Yz7ednUqbTwbTrXi+kW2fm ylK7PiHFRkVqVCcnpf6kW" + b"UrixtlX2uqZlKupXwcm47Rd1FwfUidLJh8b3qqyqr2qpJWTfzyxtC55 bi/2qfTcsJPvVi" + b"+WWW4qSdw3J301WZoAAAAAAAAAAAAAAAAAAAAAAPzOO2wqjioAKAAACmRhdGEu dGFyLmd" + b"6ICAgICAxMzE3ODg4ODY5ICAwICAgICAwICAgICAxMDA2NDQgIDEwNyAgICAgICBgCh+L " + b"CAAAAAAAAgPtyrsRwjAURNFXiioAfZBcjwkJzIyB/oGAGSIc4eic5Aa7h2P8XX6Zen+3TD" + b"1/9yNK" + b"GadWR2ltRC651hGpxw4et/u8phTny3Vdfvy2dgAAAAAAAAAAANjRE6Lr2rEAKAAACg==" +) +PKGDEB_BROKEN_DESCRIPTION = ( + b"ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTY3OTIzNzM2MyAgMCAgICAgMCAgICAgMTAwNj" + b"Q0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuenN0IDE2NzkyMzczNjMgIDAgICAg" + b"IDAgICAgIDEwMDY0NCAgMjY4ICAgICAgIGAKKLUv/QBoHQgAwk4uInBJ21bxEmhb1HK7ih" + b"shSQan5wcgNCga0N2kn+wSVP33XwppIIeGBx1QBNf1M9H1XWE3fk4z9afsrX5H3l+GtZlz" + b"e4+y5nUOhExsDOqo26a91qhCFwIjoHFGy9YXGwUUALQ1b58CXb7ja2tgOLf/RO7YGH6FkZ" + b"DSBX4jGM9bdNozbPZ6wX+hSyfsAu9B0kzUoVTKxFLKOtZKnH98zgiU0f1T9TfCJxPRRJSi" + b"HCmRIGktlSkRREmJQAEcILBKRqwcAxUD8QEAkmWFeAkIIKSmWvDwQBNwfBxuFeZKr7/ACR" + b"VYL8ABU/3amMvgAU33hMJwAGz/tzlrw2MACcQUwHyabmRhdGEudGFyLnpzdCAgICAxNjc5" + b"MjM3MzYzICAwICAgICAwICAgICAxMDA2NDQgIDg0ICAgICAgICBgCii1L/0AaF0CADQDLi" + b"8AMDc1NTE3NTEAMTQ0MDU1NjU3NDcAMDEwNTcwACA1AAB1c3RhciAgAGJyMHRoM3IACSCQ" + b"mwfMBhyGBqrXn71BlmwNcH3CQw==" +) HASH1 = base64.decodebytes(b"/ezv4AefpJJ8DuYFSq4RiEHJYP4=") @@ -327,13 +377,20 @@ HASH3 = base64.decodebytes(b"NJM05mj86veaSInYxxqL1wahods=") HASH_MINIMAL = b"6\xce\x8f\x1bM\x82MWZ\x1a\xffjAc(\xdb(\xa1\x0eG" HASH_SIMPLE_RELATIONS = ( - b"'#\xab&k\xe6\xf5E\xcfB\x9b\xceO7\xe6\xec\xa9\xddY\xaa") + b"'#\xab&k\xe6\xf5E\xcfB\x9b\xceO7\xe6\xec\xa9\xddY\xaa" +) HASH_VERSION_RELATIONS = ( - b"\x84\xc9\xb4\xb3\r\x95\x16\x03\x95\x98\xc0\x14u\x06\xf7eA\xe65\xd1") + b"\x84\xc9\xb4\xb3\r\x95\x16\x03\x95\x98\xc0\x14u\x06\xf7eA\xe65\xd1" +) HASH_MULTIPLE_RELATIONS = ( - b"\xec\xcdi\xdc\xde-\r\xc3\xd3\xc9s\x84\xe4\xc3\xd6\xc4\x12T\xa6\x0e") + b"\xec\xcdi\xdc\xde-\r\xc3\xd3\xc9s\x84\xe4\xc3\xd6\xc4\x12T\xa6\x0e" +) HASH_OR_RELATIONS = ( - b"\xa1q\xf4*\x1c\xd4L\xa1\xca\xf1\xfa?\xc3\xc7\x9f\x88\xd53B\xc9") + b"\xa1q\xf4*\x1c\xd4L\xa1\xca\xf1\xfa?\xc3\xc7\x9f\x88\xd53B\xc9" +) +HASH_BROKEN_DESCRIPTION = ( + b"\x16\xbd\xb8+\xed\xc0\x07\x84f!\xe6d\xea\xe7\xaf\xc6\xe4\t\xad\xd7" +) def create_deb(target_dir, pkg_name, pkg_data): diff -Nru landscape-client-23.02/landscape/lib/apt/package/tests/test_facade.py landscape-client-23.08/landscape/lib/apt/package/tests/test_facade.py --- landscape-client-23.02/landscape/lib/apt/package/tests/test_facade.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/tests/test_facade.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,33 +1,48 @@ -from collections import namedtuple import os import sys -import textwrap import tempfile +import textwrap import unittest import weakref +from collections import namedtuple +from unittest import mock import apt import apt_pkg +from apt.cache import LockFailedException from apt.package import Package from aptsources.sourceslist import SourcesList -from apt.cache import LockFailedException -import mock from twisted.python.compat import unicode -from landscape.lib.fs import read_text_file, create_text_file from landscape.lib import testing -from landscape.lib.apt.package.testing import ( - HASH1, HASH2, HASH3, PKGNAME1, PKGNAME2, PKGNAME3, - PKGDEB1, PKGNAME_MINIMAL, PKGDEB_MINIMAL, - create_deb, AptFacadeHelper, - create_simple_repository) -from landscape.lib.apt.package.facade import ( - TransactionError, DependencyError, ChannelError, AptFacade, - LandscapeInstallProgress) - - -_normalize_field = (lambda f: f.replace("-", "_").lower()) -_DEB_STANZA_FIELDS = [_normalize_field(f) for f in [ +from landscape.lib.apt.package.facade import AptFacade +from landscape.lib.apt.package.facade import ChannelError +from landscape.lib.apt.package.facade import DependencyError +from landscape.lib.apt.package.facade import LandscapeInstallProgress +from landscape.lib.apt.package.facade import TransactionError +from landscape.lib.apt.package.testing import AptFacadeHelper +from landscape.lib.apt.package.testing import create_deb +from landscape.lib.apt.package.testing import create_simple_repository +from landscape.lib.apt.package.testing import HASH1 +from landscape.lib.apt.package.testing import HASH2 +from landscape.lib.apt.package.testing import HASH3 +from landscape.lib.apt.package.testing import PKGDEB1 +from landscape.lib.apt.package.testing import PKGDEB_MINIMAL +from landscape.lib.apt.package.testing import PKGNAME1 +from landscape.lib.apt.package.testing import PKGNAME2 +from landscape.lib.apt.package.testing import PKGNAME3 +from landscape.lib.apt.package.testing import PKGNAME_MINIMAL +from landscape.lib.fs import create_text_file +from landscape.lib.fs import read_text_file + + +def _normalize_field(f): + return f.replace("-", "_").lower() + + +_DEB_STANZA_FIELDS = [ + _normalize_field(f) + for f in [ "Package", "Architecture", "Version", @@ -47,7 +62,8 @@ "SHA1", "SHA256", "Description", - ]] + ] +] _DebStanza = namedtuple("DebStanza", _DEB_STANZA_FIELDS) @@ -71,7 +87,7 @@ return _DebStanza(**data) -class FakeOwner(object): +class FakeOwner: """Fake Owner object that apt.progress.text.AcquireProgress expects.""" def __init__(self, filesize, error_text=""): @@ -83,7 +99,7 @@ self.error_text = error_text -class FakeFetchItem(object): +class FakeFetchItem: """Fake Item object that apt.progress.text.AcquireProgress expects.""" def __init__(self, owner, description): @@ -105,16 +121,19 @@ def update(self): self._update_called = True - return super(TestCache, self).update() + return super().update() -class AptFacadeTest(testing.HelperTestCase, testing.FSTestCase, - unittest.TestCase): +class AptFacadeTest( + testing.HelperTestCase, + testing.FSTestCase, + unittest.TestCase, +): helpers = [AptFacadeHelper, testing.EnvironSaverHelper] def setUp(self): - super(AptFacadeTest, self).setUp() + super().setUp() self.facade.max_dpkg_retries = 0 self.facade.dpkg_retry_sleep = 0 @@ -157,13 +176,28 @@ AptFacade(root=root) self.assertTrue(os.path.exists(os.path.join(root, "etc", "apt"))) self.assertTrue( - os.path.exists(os.path.join(root, "etc", "apt", "sources.list.d"))) - self.assertTrue(os.path.exists( - os.path.join(root, "var", "cache", "apt", "archives", "partial"))) - self.assertTrue(os.path.exists( - os.path.join(root, "var", "lib", "apt", "lists", "partial"))) + os.path.exists(os.path.join(root, "etc", "apt", "sources.list.d")), + ) + self.assertTrue( + os.path.exists( + os.path.join( + root, + "var", + "cache", + "apt", + "archives", + "partial", + ), + ), + ) + self.assertTrue( + os.path.exists( + os.path.join(root, "var", "lib", "apt", "lists", "partial"), + ), + ) self.assertTrue( - os.path.exists(os.path.join(root, "var", "lib", "dpkg", "status"))) + os.path.exists(os.path.join(root, "var", "lib", "dpkg", "status")), + ) def test_no_system_packages(self): """ @@ -183,8 +217,10 @@ self.facade.reload_channels() self.assertEqual( ["bar", "foo"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) def test_get_packages_multiple_version(self): """ @@ -195,12 +231,18 @@ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() self.assertEqual( [("foo", "1.0"), ("foo", "1.5")], - sorted((version.package.name, version.version) - for version in self.facade.get_packages())) + sorted( + (version.package.name, version.version) + for version in self.facade.get_packages() + ), + ) def test_get_packages_multiple_architectures(self): """ @@ -216,9 +258,13 @@ self._add_system_package("foo", version="1.0", architecture="amd64") self._add_system_package("bar", version="1.1", architecture="i386") facade.reload_channels() - self.assertEqual([("foo", "1.0")], - [(version.package.name, version.version) - for version in facade.get_packages()]) + self.assertEqual( + [("foo", "1.0")], + [ + (version.package.name, version.version) + for version in facade.get_packages() + ], + ) def test_add_channel_apt_deb_without_components(self): """ @@ -229,12 +275,14 @@ """ self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") list_filename = ( - self.apt_root + - "/etc/apt/sources.list.d/_landscape-internal-facade.list") + self.apt_root + + "/etc/apt/sources.list.d/_landscape-internal-facade.list" + ) sources_contents = read_text_file(list_filename) self.assertEqual( "deb http://example.com/ubuntu lucid\n", - sources_contents) + sources_contents, + ) def test_add_channel_apt_deb_no_duplicate(self): """ @@ -245,12 +293,14 @@ self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") list_filename = ( - self.apt_root + - "/etc/apt/sources.list.d/_landscape-internal-facade.list") + self.apt_root + + "/etc/apt/sources.list.d/_landscape-internal-facade.list" + ) sources_contents = read_text_file(list_filename) self.assertEqual( "deb http://example.com/ubuntu lucid\n", - sources_contents) + sources_contents, + ) def test_add_channel_apt_deb_with_components(self): """ @@ -260,46 +310,70 @@ If components are given, they are included after the dist. """ self.facade.add_channel_apt_deb( - "http://example.com/ubuntu", "lucid", ["main", "restricted"]) + "http://example.com/ubuntu", + "lucid", + ["main", "restricted"], + ) list_filename = ( - self.apt_root + - "/etc/apt/sources.list.d/_landscape-internal-facade.list") + self.apt_root + + "/etc/apt/sources.list.d/_landscape-internal-facade.list" + ) sources_contents = read_text_file(list_filename) self.assertEqual( "deb http://example.com/ubuntu lucid main restricted\n", - sources_contents) + sources_contents, + ) def test_add_channel_apt_deb_trusted(self): """add_channel_apt_deb sets trusted option if trusted and local.""" # Don't override trust on unsigned/signed remotes. self.facade.add_channel_apt_deb( - "http://example.com/ubuntu", "unsigned", ["main"], trusted=True) - self.facade.add_channel_apt_deb( - "http://example.com/ubuntu", "signed", ["main"], trusted=False) + "http://example.com/ubuntu", + "unsigned", + ["main"], + trusted=True, + ) + self.facade.add_channel_apt_deb( + "http://example.com/ubuntu", + "signed", + ["main"], + trusted=False, + ) # We explicitly trust local self.facade.add_channel_apt_deb( - "file://opt/spam", "unsigned", ["main"], trusted=True) + "file://opt/spam", + "unsigned", + ["main"], + trusted=True, + ) # We explicitly distrust local (thus check gpg signatures) self.facade.add_channel_apt_deb( - "file://opt/spam", "signed", ["main"], trusted=False) + "file://opt/spam", + "signed", + ["main"], + trusted=False, + ) # apt defaults (which is to check signatures on >xenial) - self.facade.add_channel_apt_deb( - "file://opt/spam", "default", ["main"]) + self.facade.add_channel_apt_deb("file://opt/spam", "default", ["main"]) list_filename = ( - self.apt_root + - "/etc/apt/sources.list.d/_landscape-internal-facade.list") + self.apt_root + + "/etc/apt/sources.list.d/_landscape-internal-facade.list" + ) sources_contents = read_text_file(list_filename) self.assertEqual( - textwrap.dedent("""\ + textwrap.dedent( + """\ deb http://example.com/ubuntu unsigned main deb http://example.com/ubuntu signed main deb [ trusted=yes ] file://opt/spam unsigned main deb [ trusted=no ] file://opt/spam signed main deb file://opt/spam default main - """), - sources_contents) + """, + ), + sources_contents, + ) def test_add_channel_deb_dir_adds_deb_channel(self): """ @@ -310,11 +384,17 @@ create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.assertEqual(1, len(self.facade.get_channels())) - self.assertEqual([{"baseurl": "file://%s" % deb_dir, - "distribution": "./", - "components": "", - "type": "deb"}], - self.facade.get_channels()) + self.assertEqual( + [ + { + "baseurl": f"file://{deb_dir}", + "distribution": "./", + "components": "", + "type": "deb", + }, + ], + self.facade.get_channels(), + ) def test_clear_channels(self): """ @@ -327,7 +407,8 @@ self.facade.clear_channels() self.assertEqual([], self.facade.get_channels()) self.assertFalse( - os.path.exists(self.facade._get_internal_sources_list())) + os.path.exists(self.facade._get_internal_sources_list()), + ) def test_clear_channels_no_channels(self): """ @@ -344,19 +425,36 @@ """ sources_list_file = apt_pkg.config.find_file("Dir::Etc::sourcelist") sources_list_d_file = os.path.join( - apt_pkg.config.find_dir("Dir::Etc::sourceparts"), "example.list") + apt_pkg.config.find_dir("Dir::Etc::sourceparts"), + "example.list", + ) create_text_file( - sources_list_file, "deb http://example1.com/ubuntu lucid main") + sources_list_file, + "deb http://example1.com/ubuntu lucid main", + ) create_text_file( - sources_list_d_file, "deb http://example2.com/ubuntu lucid main") + sources_list_d_file, + "deb http://example2.com/ubuntu lucid main", + ) self.facade.clear_channels() self.assertEqual( - [{'baseurl': 'http://example1.com/ubuntu', - 'components': 'main', 'distribution': 'lucid', 'type': 'deb'}, - {'baseurl': 'http://example2.com/ubuntu', - 'components': 'main', 'distribution': 'lucid', 'type': 'deb'}], - self.facade.get_channels()) + [ + { + "baseurl": "http://example1.com/ubuntu", + "components": "main", + "distribution": "lucid", + "type": "deb", + }, + { + "baseurl": "http://example2.com/ubuntu", + "components": "main", + "distribution": "lucid", + "type": "deb", + }, + ], + self.facade.get_channels(), + ) def test_write_package_stanza(self): """ @@ -369,9 +467,11 @@ packages_file = os.path.join(deb_dir, "Packages") with open(packages_file, "wb") as packages: self.facade.write_package_stanza(deb_file, packages) - SHA256 = ( - "f899cba22b79780dbe9bbbb802ff901b7e432425c264dc72e6bb20c0061e4f26") - expected = textwrap.dedent("""\ + sha256 = ( + "f899cba22b79780dbe9bbbb802ff901b7e432425c264dc72e6bb20c0061e4f26" + ) + expected = textwrap.dedent( + f"""\ Package: name1 Architecture: all Version: version1-release1 @@ -385,14 +485,15 @@ Recommends: recommendsname1 (= recommendsversion1) Suggests: suggestsname1 (= suggestsversion1) Conflicts: conflictsname1 (= conflictsversion1) - Filename: %(filename)s + Filename: {PKGNAME1} Size: 1038 MD5sum: efe83eb2b891046b303aaf9281c14e6e SHA1: b4ebcd2b0493008852a4954edc30a236d516c638 - SHA256: %(sha256)s + SHA256: {sha256} Description: Summary1 Description1 - """ % {"filename": PKGNAME1, "sha256": SHA256}) + """, + ) expected = _parse_deb_stanza(expected) stanza = _parse_deb_stanza(open(packages_file).read()) self.assertEqual(expected, stanza) @@ -410,7 +511,9 @@ for pkg_name in [PKGNAME1, PKGNAME2, PKGNAME3]: with open(self.makeFile(), "wb+", 0) as tmp: self.facade.write_package_stanza( - os.path.join(deb_dir, pkg_name), tmp) + os.path.join(deb_dir, pkg_name), + tmp, + ) tmp.seek(0) stanzas.append(tmp.read().decode("utf-8")) expected_contents = "\n".join(stanzas) @@ -427,8 +530,10 @@ self.facade.reload_channels() self.assertEqual( ["name1", "name2", "name3"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) def test_get_channels_with_no_channels(self): """ @@ -443,40 +548,67 @@ information about the channels. """ self.facade.add_channel_apt_deb( - "http://example.com/ubuntu", "lucid", ["main", "restricted"]) - self.assertEqual([{"baseurl": "http://example.com/ubuntu", - "distribution": "lucid", - "components": "main restricted", - "type": "deb"}], - self.facade.get_channels()) + "http://example.com/ubuntu", + "lucid", + ["main", "restricted"], + ) + self.assertEqual( + [ + { + "baseurl": "http://example.com/ubuntu", + "distribution": "lucid", + "components": "main restricted", + "type": "deb", + }, + ], + self.facade.get_channels(), + ) def test_get_channels_with_disabled_channels(self): """ C{get_channels()} doesn't return disabled deb URLs. """ self.facade.add_channel_apt_deb( - "http://enabled.example.com/ubuntu", "lucid", ["main"]) - self.facade.add_channel_apt_deb( - "http://disabled.example.com/ubuntu", "lucid", ["main"]) + "http://enabled.example.com/ubuntu", + "lucid", + ["main"], + ) + self.facade.add_channel_apt_deb( + "http://disabled.example.com/ubuntu", + "lucid", + ["main"], + ) sources_list = SourcesList() for entry in sources_list: if "disabled" in entry.uri: entry.set_enabled(False) sources_list.save() - self.assertEqual([{"baseurl": "http://enabled.example.com/ubuntu", - "distribution": "lucid", - "components": "main", - "type": "deb"}], - self.facade.get_channels()) + self.assertEqual( + [ + { + "baseurl": "http://enabled.example.com/ubuntu", + "distribution": "lucid", + "components": "main", + "type": "deb", + }, + ], + self.facade.get_channels(), + ) def test_reset_channels(self): """ C{reset_channels()} disables all the configured deb URLs. """ self.facade.add_channel_apt_deb( - "http://1.example.com/ubuntu", "lucid", ["main", "restricted"]) - self.facade.add_channel_apt_deb( - "http://2.example.com/ubuntu", "lucid", ["main", "restricted"]) + "http://1.example.com/ubuntu", + "lucid", + ["main", "restricted"], + ) + self.facade.add_channel_apt_deb( + "http://2.example.com/ubuntu", + "lucid", + ["main", "restricted"], + ) self.facade.reset_channels() self.assertEqual([], self.facade.get_channels()) @@ -489,12 +621,17 @@ self._add_package_to_deb_dir(deb_dir, "foo") self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() self.assertEqual( ["bar", "foo"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) def test_reload_channels_refetch_package_index(self): """ @@ -505,7 +642,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() new_facade = AptFacade(root=self.apt_root) self._add_package_to_deb_dir(deb_dir, "bar") @@ -514,8 +654,10 @@ new_facade.reload_channels() self.assertEqual( ["bar", "foo"], - sorted(version.package.name - for version in new_facade.get_packages())) + sorted( + version.package.name for version in new_facade.get_packages() + ), + ) def test_reload_channels_not_refetch_package_index(self): """ @@ -526,7 +668,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() new_facade = AptFacade(root=self.apt_root) self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") @@ -545,7 +690,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() self._add_package_to_deb_dir(deb_dir, "bar") self._touch_packages_file(deb_dir) @@ -600,7 +748,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.refetch_package_index = True self.facade._cache.update = new_apt_update self.facade.reload_channels(force_reload_binaries=True) @@ -622,12 +773,17 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.refetch_package_index = False self.facade._cache.update = new_apt_update self.facade.reload_channels(force_reload_binaries=True) self.assertEqual( - [self.facade._get_internal_sources_list()], passed_in_lists) + [self.facade._get_internal_sources_list()], + passed_in_lists, + ) def test_reload_channels_force_reload_binaries_old_apt(self): """ @@ -644,7 +800,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.refetch_package_index = False self.facade._cache.update = old_apt_update self.facade.reload_channels(force_reload_binaries=True) @@ -667,14 +826,18 @@ self.facade.ensure_channels_reloaded() self.assertEqual( ["foo"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) self._add_system_package("bar") self.facade.ensure_channels_reloaded() self.assertEqual( ["foo"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) def test_ensure_channels_reloaded_reload_channels(self): """ @@ -685,14 +848,18 @@ self.facade.reload_channels() self.assertEqual( ["foo"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) self._add_system_package("bar") self.facade.ensure_channels_reloaded() self.assertEqual( ["foo"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) def test_reload_channels_with_channel_error(self): """ @@ -734,14 +901,18 @@ self.facade.reload_channels() self.assertEqual( ["i386-package"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) self.facade.set_arch("amd64") self.facade.reload_channels() self.assertEqual( ["amd64-package"], - sorted(version.package.name - for version in self.facade.get_packages())) + sorted( + version.package.name for version in self.facade.get_packages() + ), + ) def test_get_package_skeleton(self): """ @@ -842,9 +1013,14 @@ # be the same objects if references were held, as apt-cache tries # not to re-create objects anymore. self.assertEqual( - sorted([version.package.name - for version in self.facade.get_packages()]), - ["name2", "name3"]) + sorted( + [ + version.package.name + for version in self.facade.get_packages() + ], + ), + ["name2", "name3"], + ) # Those should have been collected. self.assertIsNone(pkg2()) self.assertIsNone(pkg3()) @@ -861,9 +1037,11 @@ # object. new_pkgs = [version.package for version in self.facade.get_packages()] self.assertTrue( - self.facade.get_package_by_hash(HASH2).package in new_pkgs) + self.facade.get_package_by_hash(HASH2).package in new_pkgs, + ) self.assertTrue( - self.facade.get_package_by_hash(HASH3).package in new_pkgs) + self.facade.get_package_by_hash(HASH3).package in new_pkgs, + ) def test_is_package_installed_in_channel_not_installed(self): """ @@ -899,13 +1077,20 @@ create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self._add_package_to_deb_dir( - deb_dir, "name1", version="version0-release0") - self._add_package_to_deb_dir( - deb_dir, "name1", version="version2-release2") + deb_dir, + "name1", + version="version0-release0", + ) + self._add_package_to_deb_dir( + deb_dir, + "name1", + version="version2-release2", + ) self._install_deb_file(os.path.join(deb_dir, PKGNAME1)) self.facade.reload_channels() [version0, version1, version2] = sorted( - self.facade.get_packages_by_name("name1")) + self.facade.get_packages_by_name("name1"), + ) self.assertEqual("version0-release0", version0.version) self.assertFalse(self.facade.is_package_installed(version0)) self.assertEqual("version1-release1", version1.version) @@ -922,12 +1107,12 @@ self._add_system_package("newdep") self._add_system_package("foo", control_fields={"Depends": "newdep"}) self.facade.reload_channels() - dep, = sorted(self.facade.get_packages_by_name("dep")) + (dep,) = sorted(self.facade.get_packages_by_name("dep")) dep.package.mark_auto(True) # dep should not be explicitely installed dep.package.mark_install(False) - newdep, = sorted(self.facade.get_packages_by_name("newdep")) - newdep, = sorted(self.facade.get_packages_by_name("newdep")) + (newdep,) = sorted(self.facade.get_packages_by_name("newdep")) + (newdep,) = sorted(self.facade.get_packages_by_name("newdep")) newdep.package.mark_auto(True) self.assertTrue(dep.package.is_installed) self.assertTrue(dep.package.is_auto_installed) @@ -982,7 +1167,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [package] = self.facade.get_packages() self.assertFalse(self.facade.is_package_upgrade(package)) @@ -996,7 +1184,10 @@ self._add_system_package("foo", version="0.5") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [version_05, version_10] = sorted(self.facade.get_packages()) self.assertTrue(self.facade.is_package_upgrade(version_10)) @@ -1011,7 +1202,10 @@ self._add_system_package("foo", version="1.5") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [version_10, version_15] = sorted(self.facade.get_packages()) self.assertFalse(self.facade.is_package_upgrade(version_10)) @@ -1027,7 +1221,10 @@ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [package] = self.facade.get_packages() self.assertFalse(self.facade.is_package_upgrade(package)) @@ -1051,13 +1248,17 @@ # Create an APT preferences file that assigns a very low priority # to all local packages. self.makeFile( - content="Package: *\nPin: origin \"\"\nPin-priority: 10\n", - path=os.path.join(self.apt_root, "etc", "apt", "preferences")) + content='Package: *\nPin: origin ""\nPin-priority: 10\n', + path=os.path.join(self.apt_root, "etc", "apt", "preferences"), + ) self._add_system_package("foo", version="0.5") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [version_05, version_10] = sorted(self.facade.get_packages()) self.assertFalse(self.facade.is_package_upgrade(version_10)) @@ -1080,12 +1281,20 @@ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() self.assertEqual( [("foo", "1.0"), ("foo", "1.5")], - sorted([(version.package.name, version.version) - for version in self.facade.get_packages_by_name("foo")])) + sorted( + [ + (version.package.name, version.version) + for version in self.facade.get_packages_by_name("foo") + ], + ), + ) def test_perform_changes_with_nothing_to_do(self): """ @@ -1096,8 +1305,10 @@ self.assertEqual("none", os.environ["APT_LISTCHANGES_FRONTEND"]) self.assertEqual("none", os.environ["APT_LISTBUGS_FRONTEND"]) self.assertEqual("noninteractive", os.environ["DEBIAN_FRONTEND"]) - self.assertEqual(["--force-confold"], - apt_pkg.config.value_list("DPkg::options")) + self.assertEqual( + ["--force-confold"], + apt_pkg.config.value_list("DPkg::options"), + ) def test_perform_changes_with_path(self): """ @@ -1115,12 +1326,17 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) fetch_item = FakeFetchItem( - FakeOwner(1234, error_text="Some error"), "foo package") + FakeOwner(1234, error_text="Some error"), + "foo package", + ) def output_progress(fetch_progress, install_progress): fetch_progress.start() @@ -1133,13 +1349,13 @@ output = [ line.rstrip() for line in self.facade.perform_changes().splitlines() - if line.strip()] + if line.strip() + ] # Don't do a plain comparision of the output, since the output # in Lucid is slightly different. self.assertEqual(4, len(output)) self.assertTrue(output[0].startswith("Get:1 foo package")) - self.assertEqual( - ["Err foo package", " Some error"], output[1:3]) + self.assertEqual(["Err foo package", " Some error"], output[1:3]) self.assertTrue(output[3].startswith("Fetched ")) def test_perform_changes_dpkg_output(self): @@ -1149,7 +1365,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1163,9 +1382,12 @@ output = [ line.rstrip() for line in self.facade.perform_changes().splitlines() - if line.strip()] + if line.strip() + ] self.assertEqual( - ["Stdout output", "Stderr output", "Stdout output again"], output) + ["Stdout output", "Stderr output", "Stdout output again"], + output, + ) def test_perform_changes_dpkg_output_error(self): """ @@ -1175,7 +1397,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1191,11 +1416,19 @@ self.facade.perform_changes() output = [ line.rstrip() - for line in cm.exception.args[0].splitlines() if line.strip()] - self.assertEqual( - ["Oops", "Package operation log:", "Stdout output", - "Stderr output", "Stdout output again"], - output) + for line in cm.exception.args[0].splitlines() + if line.strip() + ] + self.assertEqual( + [ + "Oops", + "Package operation log:", + "Stdout output", + "Stderr output", + "Stdout output again", + ], + output, + ) def test_retry_changes_lock_failed(self): """ @@ -1205,7 +1438,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1223,7 +1459,8 @@ output = [ line.rstrip() for line in self.facade.perform_changes().splitlines() - if line.strip()] + if line.strip() + ] self.assertEqual(["bad stuff!", "good stuff!"], output) def test_retry_changes_system_error(self): @@ -1235,7 +1472,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1298,9 +1538,14 @@ progress = LandscapeInstallProgress() progress.old_excepthook = ( lambda exc_type, exc_value, exc_tb: hook_calls.append( - (exc_type, exc_value, exc_tb))) + (exc_type, exc_value, exc_tb), + ) + ) progress._prevent_dpkg_apport_error( - SystemError, SystemError("error"), object()) + SystemError, + SystemError("error"), + object(), + ) self.assertEqual([], hook_calls) def test_prevent_dpkg_apport_error_system_error_calls_system_hook(self): @@ -1328,7 +1573,9 @@ progress = LandscapeInstallProgress() progress.old_excepthook = ( lambda exc_type, exc_value, exc_tb: hook_calls.append( - (exc_type, exc_value, exc_tb))) + (exc_type, exc_value, exc_tb), + ) + ) error = object() traceback = object() progress._prevent_dpkg_apport_error(Exception, error, traceback) @@ -1342,7 +1589,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo = self.facade.get_packages_by_name("foo")[0] self.facade.mark_install(foo) @@ -1356,11 +1606,17 @@ self.facade.perform_changes() output = [ line.rstrip() - for line in cm.exception.args[0].splitlines()if line.strip()] - self.assertEqual( - ["dpkg didn't exit cleanly.", "Package operation log:", - "Stdout output"], - output) + for line in cm.exception.args[0].splitlines() + if line.strip() + ] + self.assertEqual( + [ + "dpkg didn't exit cleanly.", + "Package operation log:", + "Stdout output", + ], + output, + ) def test_perform_changes_install_broken_includes_error_info(self): """ @@ -1370,15 +1626,26 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", - control_fields={"Depends": "missing | lost (>= 1.0)", - "Pre-Depends": "pre-missing | pre-lost"}) - self._add_package_to_deb_dir( - deb_dir, "bar", - control_fields={"Depends": "also-missing | also-lost (>= 1.0)", - "Pre-Depends": "also-pre-missing | also-pre-lost"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={ + "Depends": "missing | lost (>= 1.0)", + "Pre-Depends": "pre-missing | pre-lost", + }, + ) + self._add_package_to_deb_dir( + deb_dir, + "bar", + control_fields={ + "Depends": "also-missing | also-lost (>= 1.0)", + "Pre-Depends": "also-pre-missing | also-pre-lost", + }, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -1388,16 +1655,19 @@ with self.assertRaises(TransactionError) as cm: self.facade.perform_changes() self.assertEqual( - ["The following packages have unmet dependencies:", - " bar: PreDepends: also-pre-missing but is not installable or", - " also-pre-lost but is not installable", - " bar: Depends: also-missing but is not installable or", - " also-lost (>= 1.0) but is not installable", - " foo: PreDepends: pre-missing but is not installable or", - " pre-lost but is not installable", - " foo: Depends: missing but is not installable or", - " lost (>= 1.0) but is not installable"], - cm.exception.args[0].splitlines()[-9:]) + [ + "The following packages have unmet dependencies:", + " bar: PreDepends: also-pre-missing but is not installable or", # noqa: E501 + " also-pre-lost but is not installable", + " bar: Depends: also-missing but is not installable or", + " also-lost (>= 1.0) but is not installable", + " foo: PreDepends: pre-missing but is not installable or", + " pre-lost but is not installable", + " foo: Depends: missing but is not installable or", + " lost (>= 1.0) but is not installable", + ], + cm.exception.args[0].splitlines()[-9:], + ) def test_get_broken_packages_already_installed(self): """ @@ -1418,9 +1688,15 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() self.assertEqual(set(), self.facade._get_broken_packages()) self.assertEqual("", self.facade._get_unmet_dependency_info()) @@ -1433,20 +1709,31 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_predepend(self): """ @@ -1456,20 +1743,31 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Pre-Depends": "bar"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Pre-Depends": "bar"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: PreDepends: bar but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: PreDepends: bar but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_version(self): """ @@ -1479,20 +1777,31 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar (>= 1.0)"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar (>= 1.0)"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar (>= 1.0) but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar (>= 1.0) but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_dep_install(self): """ @@ -1504,9 +1813,15 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="0.5") self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar (>= 1.0)"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar (>= 1.0)"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -1515,11 +1830,16 @@ with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar (>= 1.0) but 0.5 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar (>= 1.0) but 0.5 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_dep_already_installed(self): """ @@ -1531,10 +1851,16 @@ deb_dir = self.makeDir() self._add_system_package("bar", version="1.0") self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar (>= 3.0)"}) + deb_dir, + "foo", + control_fields={"Depends": "bar (>= 3.0)"}, + ) self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) @@ -1543,11 +1869,16 @@ with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar (>= 3.0) but 1.0 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar (>= 3.0) but 1.0 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_dep_upgraded(self): """ @@ -1559,10 +1890,16 @@ deb_dir = self.makeDir() self._add_system_package("bar", version="1.0") self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar (>= 3.0)"}) + deb_dir, + "foo", + control_fields={"Depends": "bar (>= 3.0)"}, + ) self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) @@ -1573,11 +1910,16 @@ with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar (>= 3.0) but 2.0 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar (>= 3.0) but 2.0 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_dep_downgraded(self): """ @@ -1589,10 +1931,16 @@ deb_dir = self.makeDir() self._add_system_package("bar", version="2.0") self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar (>= 3.0)"}) + deb_dir, + "foo", + control_fields={"Depends": "bar (>= 3.0)"}, + ) self._add_package_to_deb_dir(deb_dir, "bar", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) @@ -1603,11 +1951,16 @@ with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar (>= 3.0) but 1.0 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar (>= 3.0) but 1.0 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_or_deps(self): """ @@ -1617,21 +1970,32 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar | baz (>= 1.0)"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar | baz (>= 1.0)"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar but is not installable or", - " baz (>= 1.0) but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar but is not installable or", + " baz (>= 1.0) but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_conflicts(self): """ @@ -1642,9 +2006,15 @@ deb_dir = self.makeDir() self._add_system_package("foo") self._add_package_to_deb_dir( - deb_dir, "bar", control_fields={"Conflicts": "foo"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "bar", + control_fields={"Conflicts": "foo"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -1654,11 +2024,16 @@ self.facade._preprocess_package_changes() foo.package.mark_keep() self.assertEqual( - set([bar.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " bar: Conflicts: foo but 1.0 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {bar.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " bar: Conflicts: foo but 1.0 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_breaks(self): """ @@ -1668,11 +2043,20 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) - self._add_package_to_deb_dir( - deb_dir, "bar", control_fields={"Breaks": "foo"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "bar", + control_fields={"Breaks": "foo"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -1680,11 +2064,16 @@ with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: bar but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: bar but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_conflicts_not_installed(self): """ @@ -1695,11 +2084,16 @@ deb_dir = self.makeDir() self._add_system_package("foo") self._add_package_to_deb_dir( - deb_dir, "bar", - control_fields={"Conflicts": "foo, baz", "Breaks": "foo, baz"}) + deb_dir, + "bar", + control_fields={"Conflicts": "foo, baz", "Breaks": "foo, baz"}, + ) self._add_package_to_deb_dir(deb_dir, "baz") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -1709,12 +2103,17 @@ # removed by the resolver. foo.package.mark_keep() self.assertEqual( - set([bar.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " bar: Conflicts: foo but 1.0 is to be installed", - " bar: Breaks: foo but 1.0 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {bar.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " bar: Conflicts: foo but 1.0 is to be installed", + " bar: Breaks: foo but 1.0 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_with_conflicts_marked_delete(self): """ @@ -1725,11 +2124,16 @@ deb_dir = self.makeDir() self._add_system_package("foo") self._add_package_to_deb_dir( - deb_dir, "bar", - control_fields={"Conflicts": "foo, baz", "Breaks": "foo, baz"}) + deb_dir, + "bar", + control_fields={"Conflicts": "foo, baz", "Breaks": "foo, baz"}, + ) self._add_system_package("baz") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -1741,12 +2145,17 @@ # removed by the resolver. foo.package.mark_keep() self.assertEqual( - set([bar.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " bar: Conflicts: foo but 1.0 is to be installed", - " bar: Breaks: foo but 1.0 is to be installed"], - self.facade._get_unmet_dependency_info().splitlines()) + {bar.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " bar: Conflicts: foo but 1.0 is to be installed", + " bar: Breaks: foo but 1.0 is to be installed", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_only_unmet(self): """ @@ -1758,21 +2167,31 @@ self._add_system_package("there1") self._add_system_package("there2") self._add_package_to_deb_dir( - deb_dir, "foo", - control_fields={"Depends": "there1, missing1, there2 | missing2"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "there1, missing1, there2 | missing2"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Depends: missing1 but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " foo: Depends: missing1 but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_multiple_broken(self): """ @@ -1781,11 +2200,20 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) - self._add_package_to_deb_dir( - deb_dir, "another-foo", control_fields={"Depends": "another-bar"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "another-foo", + control_fields={"Depends": "another-bar"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [another_foo] = self.facade.get_packages_by_name("another-foo") @@ -1794,13 +2222,17 @@ with self.assertRaises(TransactionError): self.facade._preprocess_package_changes() self.assertEqual( - set([foo.package, another_foo.package]), - self.facade._get_broken_packages()) - self.assertEqual( - ["The following packages have unmet dependencies:", - " another-foo: Depends: another-bar but is not installable", - " foo: Depends: bar but is not installable"], - self.facade._get_unmet_dependency_info().splitlines()) + {foo.package, another_foo.package}, + self.facade._get_broken_packages(), + ) + self.assertEqual( + [ + "The following packages have unmet dependencies:", + " another-foo: Depends: another-bar but is not installable", + " foo: Depends: bar but is not installable", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def test_get_unmet_dependency_info_unknown(self): """ @@ -1813,11 +2245,14 @@ self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade._version_installs.append(foo) - self.facade._get_broken_packages = lambda: set([foo.package]) + self.facade._get_broken_packages = lambda: {foo.package} self.assertEqual( - ["The following packages have unmet dependencies:", - " foo: Unknown dependency error"], - self.facade._get_unmet_dependency_info().splitlines()) + [ + "The following packages have unmet dependencies:", + " foo: Unknown dependency error", + ], + self.facade._get_unmet_dependency_info().splitlines(), + ) def _mock_output_restore(self): """ @@ -1851,7 +2286,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1872,7 +2310,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1901,7 +2342,10 @@ self._add_package_to_deb_dir(deb_dir, "bar", version="1.5") self._add_system_package("baz") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self._add_system_package("quux", version="1.0") self._add_system_package("wibble", version="1.0") self.facade.reload_channels() @@ -1929,10 +2373,16 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) @@ -1966,7 +2416,10 @@ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo_10 = sorted(self.facade.get_packages_by_name("foo"))[0] self.facade.mark_global_upgrade() @@ -1994,7 +2447,10 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo1, foo2 = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo2, foo1.package.candidate) @@ -2015,31 +2471,61 @@ apt_pkg.config.set("APT::Architectures::", "i386") deb_dir = self.makeDir() self._add_system_package( - "multi-arch", architecture="amd64", version="1.0", - control_fields={"Multi-Arch": "same"}) - self._add_system_package( - "multi-arch", architecture="i386", version="1.0", - control_fields={"Multi-Arch": "same"}) - self._add_system_package( - "single-arch", architecture="amd64", version="1.0") - self._add_package_to_deb_dir( - deb_dir, "multi-arch", architecture="amd64", version="2.0", - control_fields={"Multi-Arch": "same"}) - self._add_package_to_deb_dir( - deb_dir, "multi-arch", architecture="i386", version="2.0", - control_fields={"Multi-Arch": "same"}) - self._add_package_to_deb_dir( - deb_dir, "single-arch", architecture="amd64", version="2.0") - self._add_package_to_deb_dir( - deb_dir, "single-arch", architecture="i386", version="2.0") - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + "multi-arch", + architecture="amd64", + version="1.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_system_package( + "multi-arch", + architecture="i386", + version="1.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_system_package( + "single-arch", + architecture="amd64", + version="1.0", + ) + self._add_package_to_deb_dir( + deb_dir, + "multi-arch", + architecture="amd64", + version="2.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "multi-arch", + architecture="i386", + version="2.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "single-arch", + architecture="amd64", + version="2.0", + ) + self._add_package_to_deb_dir( + deb_dir, + "single-arch", + architecture="i386", + version="2.0", + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() multi_arch1, multi_arch2 = sorted( - self.facade.get_packages_by_name("multi-arch")) + self.facade.get_packages_by_name("multi-arch"), + ) single_arch1, single_arch2 = sorted( - self.facade.get_packages_by_name("single-arch")) + self.facade.get_packages_by_name("single-arch"), + ) self.facade.mark_remove(multi_arch1) self.facade.mark_install(multi_arch2) self.facade.mark_remove(single_arch1) @@ -2048,11 +2534,16 @@ self.facade.perform_changes() changes = [ (pkg.name, pkg.candidate.version, pkg.marked_upgrade) - for pkg in self.facade._cache.get_changes()] + for pkg in self.facade._cache.get_changes() + ] self.assertEqual( - [("multi-arch", "2.0", True), ("multi-arch:i386", "2.0", True), - ("single-arch", "2.0", True)], - sorted(changes)) + [ + ("multi-arch", "2.0", True), + ("multi-arch:i386", "2.0", True), + ("single-arch", "2.0", True), + ], + sorted(changes), + ) def test_wb_mark_install_upgrade_non_main_arch_dependency_error(self): """ @@ -2067,35 +2558,57 @@ apt_pkg.config.set("APT::Architectures::", "i386") deb_dir = self.makeDir() self._add_system_package( - "multi-arch", architecture="amd64", version="1.0", - control_fields={"Multi-Arch": "same"}) - self._add_system_package( - "multi-arch", architecture="i386", version="1.0", - control_fields={"Multi-Arch": "same"}) - self._add_package_to_deb_dir( - deb_dir, "multi-arch", architecture="amd64", version="2.0", - control_fields={"Multi-Arch": "same"}) - self._add_package_to_deb_dir( - deb_dir, "multi-arch", architecture="i386", version="2.0", - control_fields={"Multi-Arch": "same"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + "multi-arch", + architecture="amd64", + version="1.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_system_package( + "multi-arch", + architecture="i386", + version="1.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "multi-arch", + architecture="amd64", + version="2.0", + control_fields={"Multi-Arch": "same"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "multi-arch", + architecture="i386", + version="2.0", + control_fields={"Multi-Arch": "same"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() multi_arch1, multi_arch2 = sorted( - self.facade.get_packages_by_name("multi-arch")) + self.facade.get_packages_by_name("multi-arch"), + ) self.facade.mark_global_upgrade() self.patch_cache_commit() with self.assertRaises(DependencyError) as cm: self.facade.perform_changes() self.assertEqual( - sorted([multi_arch1, multi_arch2]), sorted(cm.exception.packages)) + sorted([multi_arch1, multi_arch2]), + sorted(cm.exception.packages), + ) changes = [ (pkg.name, pkg.candidate.version) - for pkg in self.facade._cache.get_changes()] + for pkg in self.facade._cache.get_changes() + ] self.assertEqual( [("multi-arch", "2.0"), ("multi-arch:i386", "2.0")], - sorted(changes)) + sorted(changes), + ) def test_mark_global_upgrade(self): """ @@ -2110,13 +2623,16 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir(deb_dir, "baz") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo1, foo2 = sorted(self.facade.get_packages_by_name("foo")) self.facade.mark_global_upgrade() with self.assertRaises(DependencyError) as cm: self.facade.perform_changes() - self.assertEqual(set([foo1, foo2]), set(cm.exception.packages)) + self.assertEqual({foo1, foo2}, set(cm.exception.packages)) def test_mark_global_upgrade_candidate_version(self): """ @@ -2131,14 +2647,17 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir(deb_dir, "foo", version="3.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo1, foo2, foo3 = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo3, foo1.package.candidate) self.facade.mark_global_upgrade() with self.assertRaises(DependencyError) as cm: self.facade.perform_changes() - self.assertEqual(set([foo1, foo3]), set(cm.exception.packages)) + self.assertEqual({foo1, foo3}, set(cm.exception.packages)) def test_mark_global_upgrade_no_upgrade(self): """ @@ -2151,7 +2670,10 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo3 = sorted(self.facade.get_packages_by_name("foo"))[-1] self.assertEqual(foo3, foo3.package.candidate) @@ -2168,7 +2690,10 @@ self._add_system_package("noauto", version="1.0") self._add_package_to_deb_dir(deb_dir, "noauto", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() auto1, auto2 = sorted(self.facade.get_packages_by_name("auto")) noauto1, noauto2 = sorted(self.facade.get_packages_by_name("noauto")) @@ -2225,18 +2750,22 @@ """ deb_dir = self.makeDir() self._add_system_package( - "broken", control_fields={"Depends": "missing"}) + "broken", + control_fields={"Depends": "missing"}, + ) self._add_package_to_deb_dir(deb_dir, "foo") self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) self.patch_cache_commit() self.assertEqual("", self.facade.perform_changes()) - self.assertEqual( - [foo.package], self.facade._cache.get_changes()) + self.assertEqual([foo.package], self.facade._cache.get_changes()) def test_perform_changes_with_broken_packages_install_deps(self): """ @@ -2246,13 +2775,21 @@ """ deb_dir = self.makeDir() self._add_system_package( - "broken", control_fields={"Depends": "missing"}) - self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) + "broken", + control_fields={"Depends": "missing"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) self._add_package_to_deb_dir(deb_dir, "bar") self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -2269,18 +2806,22 @@ """ deb_dir = self.makeDir() self._add_system_package( - "broken", control_fields={"Depends": "missing"}) + "broken", + control_fields={"Depends": "missing"}, + ) self._add_system_package("foo") self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove(foo) self.patch_cache_commit() self.assertEqual("", self.facade.perform_changes()) - self.assertEqual( - [foo.package], self.facade._cache.get_changes()) + self.assertEqual([foo.package], self.facade._cache.get_changes()) def test_perform_changes_with_broken_packages_install_broken(self): """ @@ -2294,18 +2835,28 @@ """ deb_dir = self.makeDir() self._add_system_package( - "broken", control_fields={"Depends": "missing"}) - self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "really-missing"}) + "broken", + control_fields={"Depends": "missing"}, + ) + self._add_package_to_deb_dir( + deb_dir, + "foo", + control_fields={"Depends": "really-missing"}, + ) self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [broken] = self.facade.get_packages_by_name("broken") [foo] = self.facade.get_packages_by_name("foo") [missing] = self.facade.get_packages_by_name("missing") self.assertEqual( - set([broken.package]), self.facade._get_broken_packages()) + {broken.package}, + self.facade._get_broken_packages(), + ) self.facade.mark_install(foo) self.facade.mark_install(missing) self.patch_cache_commit() @@ -2313,9 +2864,12 @@ self.facade.perform_changes() self.assertIn( "The following packages have unmet dependencies", - cm.exception.args[0]) + cm.exception.args[0], + ) self.assertEqual( - set([foo.package]), self.facade._get_broken_packages()) + {foo.package}, + self.facade._get_broken_packages(), + ) def test_wb_perform_changes_commit_error(self): """ @@ -2332,7 +2886,9 @@ with self.assertRaises(TransactionError) as cm: self.facade.perform_changes() mock_commit.assert_called_with( - fetch_progress=mock.ANY, install_progress=mock.ANY) + fetch_progress=mock.ANY, + install_progress=mock.ANY, + ) self.assertIn("Something went wrong.", cm.exception.args[0]) def test_mark_install_transaction_error(self): @@ -2351,12 +2907,15 @@ with self.assertRaises(TransactionError) as cm: self.facade.perform_changes() self.assertEqual( - ["The following packages have unmet dependencies:", - " name1: PreDepends: prerequirename1 (= prerequireversion1)" + - " but is not installable", - " name1: Depends: requirename1 (= requireversion1) but is not" + - " installable"], - cm.exception.args[0].splitlines()[-3:]) + [ + "The following packages have unmet dependencies:", + " name1: PreDepends: prerequirename1 (= prerequireversion1)" + + " but is not installable", + " name1: Depends: requirename1 (= requireversion1) but is not" + + " installable", + ], + cm.exception.args[0].splitlines()[-3:], + ) def test_mark_install_dependency_error(self): """ @@ -2365,10 +2924,16 @@ """ deb_dir = self.makeDir() self._add_package_to_deb_dir( - deb_dir, "foo", control_fields={"Depends": "bar"}) + deb_dir, + "foo", + control_fields={"Depends": "bar"}, + ) self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -2387,7 +2952,10 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) @@ -2412,7 +2980,10 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) @@ -2455,7 +3026,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) @@ -2479,7 +3053,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) @@ -2491,7 +3068,7 @@ with self.assertRaises(DependencyError) as cm: self.facade._check_changes([]) - self.assertEqual(set([foo1, foo2]), set(cm.exception.packages)) + self.assertEqual({foo1, foo2}, set(cm.exception.packages)) def test_check_changes_unapproved_downgrade(self): """ @@ -2504,7 +3081,10 @@ self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="3.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo"))[:2] self.assertEqual(foo1.package, foo2.package) @@ -2517,7 +3097,7 @@ with self.assertRaises(DependencyError) as cm: self.facade._check_changes([]) - self.assertEqual(set([foo1, foo2]), set(cm.exception.packages)) + self.assertEqual({foo1, foo2}, set(cm.exception.packages)) def test_mark_global_upgrade_dependency_error(self): """ @@ -2528,10 +3108,17 @@ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir( - deb_dir, "foo", version="1.5", control_fields={"Depends": "bar"}) + deb_dir, + "foo", + version="1.5", + control_fields={"Depends": "bar"}, + ) self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo_10, foo_15 = sorted(self.facade.get_packages_by_name("foo")) [bar] = self.facade.get_packages_by_name("bar") @@ -2540,7 +3127,8 @@ self.facade.perform_changes() self.assertEqual( sorted([bar, foo_10, foo_15], key=self.version_sortkey), - sorted(cm.exception.packages, key=self.version_sortkey)) + sorted(cm.exception.packages, key=self.version_sortkey), + ) def test_mark_remove_dependency_error(self): """ @@ -2563,9 +3151,13 @@ C{TransactionError} is raised by C{perform_changes}. """ self._add_system_package( - "foo", control_fields={"Status": "hold ok installed"}) - self._add_system_package( - "bar", control_fields={"Status": "hold ok installed"}) + "foo", + control_fields={"Status": "hold ok installed"}, + ) + self._add_system_package( + "bar", + control_fields={"Status": "hold ok installed"}, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -2574,8 +3166,10 @@ with self.assertRaises(TransactionError) as cm: self.facade.perform_changes() self.assertEqual( - "Can't perform the changes, since the following packages" + - " are held: bar, foo", cm.exception.args[0]) + "Can't perform the changes, since the following packages" + + " are held: bar, foo", + cm.exception.args[0], + ) def test_changer_upgrade_package(self): """ @@ -2586,13 +3180,15 @@ removal, since that can result in packages that depend on the upgraded package to be removed. """ - self._add_system_package( - "foo", control_fields={"Depends": "bar"}) + self._add_system_package("foo", control_fields={"Depends": "bar"}) self._add_system_package("bar", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() bar_1, bar_2 = sorted(self.facade.get_packages_by_name("bar")) self.facade.mark_install(bar_2) @@ -2607,13 +3203,15 @@ An upgrade request should preserve an existing auto flag on the upgraded package. """ - self._add_system_package( - "foo", control_fields={"Depends": "bar"}) + self._add_system_package("foo", control_fields={"Depends": "bar"}) self._add_system_package("bar", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() bar_1, bar_2 = sorted(self.facade.get_packages_by_name("bar")) bar_1.package.mark_auto() @@ -2631,13 +3229,15 @@ An upgrade request should mark a package as manual if the installed version is manual. """ - self._add_system_package( - "foo", control_fields={"Depends": "bar"}) + self._add_system_package("foo", control_fields={"Depends": "bar"}) self._add_system_package("bar", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() bar_1, bar_2 = sorted(self.facade.get_packages_by_name("bar")) @@ -2657,9 +3257,12 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() - bar_2, = self.facade.get_packages_by_name("bar") + (bar_2,) = self.facade.get_packages_by_name("bar") self.facade.mark_install(bar_2) self.patch_cache_commit() @@ -2675,12 +3278,17 @@ that package. """ self._add_system_package( - "foo", version="1.0", - control_fields={"Status": "hold ok installed"}) + "foo", + version="1.0", + control_fields={"Status": "hold ok installed"}, + ) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo_10, foo_15] = sorted(self.facade.get_packages_by_name("foo")) self.facade.mark_global_upgrade() @@ -2694,18 +3302,28 @@ marked as requiring upgrade. """ self._add_system_package( - "foo", version="1.0", - control_fields={"Status": "hold ok installed"}) - self._add_system_package( - "bar", version="1.0", - control_fields={"Depends": "foo"}) + "foo", + version="1.0", + control_fields={"Status": "hold ok installed"}, + ) + self._add_system_package( + "bar", + version="1.0", + control_fields={"Depends": "foo"}, + ) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir( - deb_dir, "bar", version="2.0", - control_fields={"Depends": "foo (>> 1.0)"}) - self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + deb_dir, + "bar", + version="2.0", + control_fields={"Depends": "foo (>> 1.0)"}, + ) + self.facade.add_channel_apt_deb( + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [foo_1, foo_2] = sorted(self.facade.get_packages_by_name("foo")) [bar_1, bar_2] = sorted(self.facade.get_packages_by_name("bar")) @@ -2724,7 +3342,9 @@ those packages to a newer version. """ self._add_system_package( - "foo", control_fields={"Status": "hold ok installed"}) + "foo", + control_fields={"Status": "hold ok installed"}, + ) self._add_system_package("bar") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") @@ -2736,14 +3356,19 @@ held package. """ self._add_system_package( - "foo", version="1.0", - control_fields={"Status": "hold ok installed"}) + "foo", + version="1.0", + control_fields={"Status": "hold ok installed"}, + ) self._add_system_package("bar", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self._add_package_to_deb_dir(deb_dir, "bar", version="1.5") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() foo_10 = sorted(self.facade.get_packages_by_name("foo"))[0] self.assertEqual([foo_10], self.facade.get_locked_packages()) @@ -2757,9 +3382,15 @@ """ self._add_system_package("foo", version="1.0") self._add_system_package( - "bar", version="1.0", control_fields={"Depends": "foo"}) - self._add_system_package( - "baz", version="1.0", control_fields={"Depends": "foo"}) + "bar", + version="1.0", + control_fields={"Depends": "foo"}, + ) + self._add_system_package( + "baz", + version="1.0", + control_fields={"Depends": "foo"}, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") @@ -2770,7 +3401,8 @@ self.assertEqual( sorted(cm.exception.packages, key=self.version_sortkey), - sorted([bar, baz], key=self.version_sortkey)) + sorted([bar, baz], key=self.version_sortkey), + ) def test_get_package_holds_with_no_hold(self): """ @@ -2787,16 +3419,22 @@ the name of the packages that are held. """ self._add_system_package( - "foo", control_fields={"Status": "hold ok installed"}) + "foo", + control_fields={"Status": "hold ok installed"}, + ) self._add_system_package("bar") self._add_system_package( - "baz", control_fields={"Status": "hold ok installed"}) + "baz", + control_fields={"Status": "hold ok installed"}, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [baz] = self.facade.get_packages_by_name("baz") self.assertEqual( - ["baz", "foo"], sorted(self.facade.get_package_holds())) + ["baz", "foo"], + sorted(self.facade.get_package_holds()), + ) def test_mark_hold_and_perform_hold_changes(self): """ @@ -2807,8 +3445,10 @@ self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_hold(foo) - self.assertEqual("Package holds successfully changed.", - self.facade._perform_hold_changes()) + self.assertEqual( + "Package holds successfully changed.", + self.facade._perform_hold_changes(), + ) self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) @@ -2845,7 +3485,9 @@ C{perform_changes} won't return an error. """ self._add_system_package( - "foo", control_fields={"Status": "hold ok installed"}) + "foo", + control_fields={"Status": "hold ok installed"}, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_hold(foo) @@ -2859,7 +3501,9 @@ C{mark_remove_hold} marks a package as not held. """ self._add_system_package( - "foo", control_fields={"Status": "hold ok installed"}) + "foo", + control_fields={"Status": "hold ok installed"}, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove_hold(foo) @@ -2878,7 +3522,10 @@ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb( - "file://%s" % deb_dir, "./", trusted=True) + f"file://{deb_dir}", + "./", + trusted=True, + ) self.facade.reload_channels() [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_remove_hold(bar) @@ -2893,7 +3540,9 @@ C{mark_remove_hold} and C{perform_changes} are called. """ self._add_system_package( - "foo", control_fields={"Status": "deinstall ok installed"}) + "foo", + control_fields={"Status": "deinstall ok installed"}, + ) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove_hold(foo) @@ -2903,7 +3552,9 @@ self.assertEqual([], self.facade.get_package_holds()) [foo] = self.facade.get_packages_by_name("foo") self.assertEqual( - apt_pkg.SELSTATE_DEINSTALL, foo.package._pkg.selected_state) + apt_pkg.SELSTATE_DEINSTALL, + foo.package._pkg.selected_state, + ) def test_creation_of_key_ring(self): """ @@ -2919,7 +3570,8 @@ # multi-arch support isn't available. skip_message = "multi-arch not supported" test_wb_mark_install_upgrade_non_main_arch_dependency_error.skip = ( - skip_message) + skip_message + ) test_wb_mark_install_upgrade_non_main_arch.skip = skip_message if apt_pkg.VERSION.startswith("0.7.25"): diff -Nru landscape-client-23.02/landscape/lib/apt/package/tests/test_skeleton.py landscape-client-23.08/landscape/lib/apt/package/tests/test_skeleton.py --- landscape-client-23.02/landscape/lib/apt/package/tests/test_skeleton.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/tests/test_skeleton.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,47 +1,84 @@ import locale import unittest -from landscape.lib import testing -from landscape.lib.apt.package.testing import ( - AptFacadeHelper, HASH1, create_simple_repository, create_deb, - PKGNAME_MINIMAL, PKGDEB_MINIMAL, HASH_MINIMAL, PKGNAME_SIMPLE_RELATIONS, - PKGDEB_SIMPLE_RELATIONS, HASH_SIMPLE_RELATIONS, PKGNAME_VERSION_RELATIONS, - PKGDEB_VERSION_RELATIONS, HASH_VERSION_RELATIONS, - PKGNAME_MULTIPLE_RELATIONS, PKGDEB_MULTIPLE_RELATIONS, - HASH_MULTIPLE_RELATIONS, PKGNAME_OR_RELATIONS, PKGDEB_OR_RELATIONS, - HASH_OR_RELATIONS) -from landscape.lib.apt.package.skeleton import ( - build_skeleton_apt, DEB_PROVIDES, DEB_PACKAGE, - DEB_NAME_PROVIDES, DEB_REQUIRES, DEB_OR_REQUIRES, DEB_UPGRADES, - DEB_CONFLICTS, PackageSkeleton) - from twisted.python.compat import unicode +from landscape.lib import testing +from landscape.lib.apt.package.skeleton import build_skeleton_apt +from landscape.lib.apt.package.skeleton import DEB_CONFLICTS +from landscape.lib.apt.package.skeleton import DEB_NAME_PROVIDES +from landscape.lib.apt.package.skeleton import DEB_OR_REQUIRES +from landscape.lib.apt.package.skeleton import DEB_PACKAGE +from landscape.lib.apt.package.skeleton import DEB_PROVIDES +from landscape.lib.apt.package.skeleton import DEB_REQUIRES +from landscape.lib.apt.package.skeleton import DEB_UPGRADES +from landscape.lib.apt.package.skeleton import PackageSkeleton +from landscape.lib.apt.package.testing import AptFacadeHelper +from landscape.lib.apt.package.testing import create_deb +from landscape.lib.apt.package.testing import create_simple_repository +from landscape.lib.apt.package.testing import HASH1 +from landscape.lib.apt.package.testing import HASH_MINIMAL +from landscape.lib.apt.package.testing import HASH_MULTIPLE_RELATIONS +from landscape.lib.apt.package.testing import HASH_OR_RELATIONS +from landscape.lib.apt.package.testing import HASH_SIMPLE_RELATIONS +from landscape.lib.apt.package.testing import HASH_VERSION_RELATIONS +from landscape.lib.apt.package.testing import PKGDEB_BROKEN_DESCRIPTION +from landscape.lib.apt.package.testing import PKGDEB_MINIMAL +from landscape.lib.apt.package.testing import PKGDEB_MULTIPLE_RELATIONS +from landscape.lib.apt.package.testing import PKGDEB_OR_RELATIONS +from landscape.lib.apt.package.testing import PKGDEB_SIMPLE_RELATIONS +from landscape.lib.apt.package.testing import PKGDEB_VERSION_RELATIONS +from landscape.lib.apt.package.testing import PKGNAME_BROKEN_DESCRIPTION +from landscape.lib.apt.package.testing import PKGNAME_MINIMAL +from landscape.lib.apt.package.testing import PKGNAME_MULTIPLE_RELATIONS +from landscape.lib.apt.package.testing import PKGNAME_OR_RELATIONS +from landscape.lib.apt.package.testing import PKGNAME_SIMPLE_RELATIONS +from landscape.lib.apt.package.testing import PKGNAME_VERSION_RELATIONS -class SkeletonTestHelper(object): + +class SkeletonTestHelper: """A helper to set up a repository for the skeleton tests.""" def set_up(self, test_case): test_case.skeleton_repository_dir = test_case.makeDir() create_simple_repository(test_case.skeleton_repository_dir) create_deb( - test_case.skeleton_repository_dir, PKGNAME_MINIMAL, PKGDEB_MINIMAL) + test_case.skeleton_repository_dir, + PKGNAME_MINIMAL, + PKGDEB_MINIMAL, + ) + create_deb( + test_case.skeleton_repository_dir, + PKGNAME_SIMPLE_RELATIONS, + PKGDEB_SIMPLE_RELATIONS, + ) create_deb( - test_case.skeleton_repository_dir, PKGNAME_SIMPLE_RELATIONS, - PKGDEB_SIMPLE_RELATIONS) + test_case.skeleton_repository_dir, + PKGNAME_VERSION_RELATIONS, + PKGDEB_VERSION_RELATIONS, + ) create_deb( - test_case.skeleton_repository_dir, PKGNAME_VERSION_RELATIONS, - PKGDEB_VERSION_RELATIONS) + test_case.skeleton_repository_dir, + PKGNAME_MULTIPLE_RELATIONS, + PKGDEB_MULTIPLE_RELATIONS, + ) create_deb( - test_case.skeleton_repository_dir, PKGNAME_MULTIPLE_RELATIONS, - PKGDEB_MULTIPLE_RELATIONS) + test_case.skeleton_repository_dir, + PKGNAME_OR_RELATIONS, + PKGDEB_OR_RELATIONS, + ) create_deb( - test_case.skeleton_repository_dir, PKGNAME_OR_RELATIONS, - PKGDEB_OR_RELATIONS) + test_case.skeleton_repository_dir, + PKGNAME_BROKEN_DESCRIPTION, + PKGDEB_BROKEN_DESCRIPTION, + ) -class BaseTestCase(testing.HelperTestCase, testing.FSTestCase, - unittest.TestCase): +class BaseTestCase( + testing.HelperTestCase, + testing.FSTestCase, + unittest.TestCase, +): pass @@ -51,7 +88,7 @@ helpers = [AptFacadeHelper, SkeletonTestHelper] def setUp(self): - super(SkeletonAptTest, self).setUp() + super().setUp() self.facade.add_channel_deb_dir(self.skeleton_repository_dir) # Don't use reload_channels(), since that causes the test setup # depending on build_skeleton_apt working correctly, which makes @@ -89,7 +126,8 @@ (DEB_REQUIRES, "prerequirename1 = prerequireversion1"), (DEB_REQUIRES, "requirename1 = requireversion1"), (DEB_UPGRADES, "name1 < version1-release1"), - (DEB_CONFLICTS, "conflictsname1 = conflictsversion1")] + (DEB_CONFLICTS, "conflictsname1 = conflictsversion1"), + ] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH1, skeleton.get_hash(), HASH1) @@ -116,6 +154,13 @@ self.assertTrue(isinstance(skeleton.version, unicode)) self.assertEqual(HASH1, skeleton.get_hash()) + def test_build_skeleton_with_broken_description(self): + """ + A package that has only the required fields and a broken description. + """ + brokendescription_package = self.get_package("brokendescription") + build_skeleton_apt(brokendescription_package) + def test_build_skeleton_with_info(self): """ If C{with_info} is passed to build_skeleton_apt, C{section}, @@ -158,12 +203,15 @@ self.addCleanup(locale.resetlocale) self._add_package_to_deb_dir( - self.skeleton_repository_dir, "pkg", description=u"T\xe9st") + self.skeleton_repository_dir, + "pkg", + description="T\xe9st", + ) self.facade._cache.update(None) self.facade._cache.open(None) pkg = self.get_package("pkg") skeleton = build_skeleton_apt(pkg, with_unicode=True, with_info=True) - self.assertEqual(u"T\u00E9st", skeleton.description) + self.assertEqual("T\u00E9st", skeleton.description) def test_build_skeleton_minimal(self): """ @@ -181,7 +229,8 @@ self.assertEqual(None, skeleton.installed_size) relations = [ (DEB_NAME_PROVIDES, "minimal = 1.0"), - (DEB_UPGRADES, "minimal < 1.0")] + (DEB_UPGRADES, "minimal < 1.0"), + ] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_MINIMAL, skeleton.get_hash()) @@ -195,7 +244,8 @@ self.assertEqual("", skeleton.section) self.assertEqual( "A minimal package with no dependencies or other relations.", - skeleton.summary) + skeleton.summary, + ) self.assertEqual("", skeleton.description) self.assertEqual(558, skeleton.size) self.assertEqual(None, skeleton.installed_size) @@ -216,7 +266,8 @@ (DEB_REQUIRES, "predepend1"), (DEB_UPGRADES, "simple-relations < 1.0"), (DEB_CONFLICTS, "break1"), - (DEB_CONFLICTS, "conflict1")] + (DEB_CONFLICTS, "conflict1"), + ] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_SIMPLE_RELATIONS, skeleton.get_hash()) @@ -236,7 +287,8 @@ (DEB_REQUIRES, "predepend1 <= 2.0"), (DEB_UPGRADES, "version-relations < 1.0"), (DEB_CONFLICTS, "break1 > 2.0"), - (DEB_CONFLICTS, "conflict1 < 2.0")] + (DEB_CONFLICTS, "conflict1 < 2.0"), + ] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_VERSION_RELATIONS, skeleton.get_hash()) @@ -263,7 +315,8 @@ (DEB_CONFLICTS, "break1 > 2.0"), (DEB_CONFLICTS, "break2"), (DEB_CONFLICTS, "conflict1 < 2.0"), - (DEB_CONFLICTS, "conflict2")] + (DEB_CONFLICTS, "conflict2"), + ] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_MULTIPLE_RELATIONS, skeleton.get_hash()) @@ -280,13 +333,13 @@ (DEB_NAME_PROVIDES, "or-relations = 1.0"), (DEB_OR_REQUIRES, "depend1 = 2.0 | depend2"), (DEB_OR_REQUIRES, "predepend1 <= 2.0 | predepend2"), - (DEB_UPGRADES, "or-relations < 1.0")] + (DEB_UPGRADES, "or-relations < 1.0"), + ] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_OR_RELATIONS, skeleton.get_hash()) class SkeletonTest(BaseTestCase): - def test_skeleton_set_hash(self): """ If the hash is explictly set using C{set_hash}, C{get_hash} diff -Nru landscape-client-23.02/landscape/lib/apt/package/tests/test_store.py landscape-client-23.08/landscape/lib/apt/package/tests/test_store.py --- landscape-client-23.02/landscape/lib/apt/package/tests/test_store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/apt/package/tests/test_store.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,14 @@ -import mock import sqlite3 import threading import time import unittest +from unittest import mock from landscape.lib import testing -from landscape.lib.apt.package.store import ( - HashIdStore, PackageStore, UnknownHashIDRequest, InvalidHashIdDb) +from landscape.lib.apt.package.store import HashIdStore +from landscape.lib.apt.package.store import InvalidHashIdDb +from landscape.lib.apt.package.store import PackageStore +from landscape.lib.apt.package.store import UnknownHashIDRequest class BaseTestCase(testing.FSTestCase, unittest.TestCase): @@ -14,9 +16,8 @@ class HashIdStoreTest(BaseTestCase): - def setUp(self): - super(HashIdStoreTest, self).setUp() + super().setUp() self.filename = self.makeFile() self.store1 = HashIdStore(self.filename) @@ -49,7 +50,7 @@ db = sqlite3.connect(self.store1._filename) commits = [] - class FakeDb(object): + class FakeDb: def __getattr__(self, name): if name == "commit": return self.commit @@ -70,7 +71,7 @@ db = sqlite3.connect(self.store1._filename) rollbacks = [] - class FakeDb(object): + class FakeDb: def __getattr__(self, name): if name == "rollback": return self.rollback @@ -117,8 +118,10 @@ store_filename = self.makeFile() db = sqlite3.connect(store_filename) cursor = db.cursor() - cursor.execute("CREATE TABLE hash" - " (junk INTEGER PRIMARY KEY, hash BLOB UNIQUE)") + cursor.execute( + "CREATE TABLE hash" + " (junk INTEGER PRIMARY KEY, hash BLOB UNIQUE)", + ) cursor.close() db.commit() @@ -127,9 +130,8 @@ class PackageStoreTest(BaseTestCase): - def setUp(self): - super(PackageStoreTest, self).setUp() + super().setUp() self.filename = self.makeFile() self.store1 = PackageStore(self.filename) @@ -146,7 +148,6 @@ self.assertTrue(self.store1.has_hash_id_db()) def test_add_hash_id_db_with_non_sqlite_file(self): - def junk_db_factory(): filename = self.makeFile() open(filename, "w").write("junk") @@ -165,19 +166,23 @@ self.assertFalse(self.store1.has_hash_id_db()) def test_add_hash_id_db_with_wrong_schema(self): - def non_compliant_db_factory(): filename = self.makeFile() db = sqlite3.connect(filename) cursor = db.cursor() - cursor.execute("CREATE TABLE hash" - " (junk INTEGER PRIMARY KEY, hash BLOB UNIQUE)") + cursor.execute( + "CREATE TABLE hash" + " (junk INTEGER PRIMARY KEY, hash BLOB UNIQUE)", + ) cursor.close() db.commit() return filename - self.assertRaises(InvalidHashIdDb, self.store1.add_hash_id_db, - non_compliant_db_factory()) + self.assertRaises( + InvalidHashIdDb, + self.store1.add_hash_id_db, + non_compliant_db_factory(), + ) self.assertFalse(self.store1.has_hash_id_db()) def hash_id_db_factory(self, hash_ids): @@ -195,10 +200,12 @@ self.store1.set_hash_ids({b"hash1": 1}) # Add a couple of hash=>id dbs - self.store1.add_hash_id_db(self.hash_id_db_factory({b"hash1": 2, - b"hash2": 3})) - self.store1.add_hash_id_db(self.hash_id_db_factory({b"hash2": 4, - b"ha\x00sh1": 5})) + self.store1.add_hash_id_db( + self.hash_id_db_factory({b"hash1": 2, b"hash2": 3}), + ) + self.store1.add_hash_id_db( + self.hash_id_db_factory({b"hash2": 4, b"ha\x00sh1": 5}), + ) # Check look-up priorities and binary hashes self.assertEqual(self.store1.get_hash_id(b"hash1"), 2) @@ -212,8 +219,9 @@ the desired mapping is not found. """ self.store1.add_hash_id_db(self.hash_id_db_factory({b"hash1": 123})) - self.store1.add_hash_id_db(self.hash_id_db_factory({b"hash1": 999, - b"hash2": 456})) + self.store1.add_hash_id_db( + self.hash_id_db_factory({b"hash1": 999, b"hash2": 456}), + ) self.store1.set_hash_ids({b"hash3": 789}) self.assertEqual(self.store1.get_id_hash(123), b"hash1") self.assertEqual(self.store1.get_id_hash(456), b"hash2") @@ -253,9 +261,10 @@ """Adding 20k ids must take less than 5 seconds.""" started = time.time() self.store1.add_available_upgrades(range(20000)) - self.assertTrue(time.time() - started < 5, - "Adding 20k available upgrades ids took " - "more than 5 seconds.") + self.assertTrue( + time.time() - started < 5, + "Adding 20k available upgrades ids took " "more than 5 seconds.", + ) def test_remove_available_upgrades(self): self.store1.add_available_upgrades([1, 2, 3, 4]) @@ -266,9 +275,10 @@ self.store1.add_available_upgrades(range(20000)) started = time.time() self.store1.remove_available_upgrades(range(20000)) - self.assertTrue(time.time() - started < 5, - "Removing 20k available upgrades ids took " - "more than 5 seconds.") + self.assertTrue( + time.time() - started < 5, + "Removing 20k available upgrades ids took " "more than 5 seconds.", + ) def test_clear_available_upgrades(self): self.store1.add_available_upgrades([1, 2, 3, 4]) @@ -306,8 +316,10 @@ """Adding 20k ids must take less than 5 seconds.""" started = time.time() self.store1.add_installed(range(20000)) - self.assertTrue(time.time() - started < 5, - "Adding 20k installed ids took more than 5 seconds.") + self.assertTrue( + time.time() - started < 5, + "Adding 20k installed ids took more than 5 seconds.", + ) def test_remove_installed(self): self.store1.add_installed([1, 2, 3, 4]) @@ -318,8 +330,10 @@ self.store1.add_installed(range(20000)) started = time.time() self.store1.remove_installed(range(20000)) - self.assertTrue(time.time() - started < 5, - "Removing 20k installed ids took more than 5 seconds.") + self.assertTrue( + time.time() - started < 5, + "Removing 20k installed ids took more than 5 seconds.", + ) def test_clear_installed(self): self.store1.add_installed([1, 2, 3, 4]) @@ -334,18 +348,21 @@ filename = self.makeFile() database = sqlite3.connect(filename) cursor = database.cursor() - cursor.execute("CREATE TABLE available" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE available_upgrade" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE installed" - " (id INTEGER PRIMARY KEY)") - cursor.execute("CREATE TABLE hash_id_request" - " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP," - " message_id INTEGER, hashes BLOB)") - cursor.execute("CREATE TABLE task" - " (id INTEGER PRIMARY KEY, queue TEXT," - " timestamp TIMESTAMP, data BLOB)") + cursor.execute("CREATE TABLE available" " (id INTEGER PRIMARY KEY)") + cursor.execute( + "CREATE TABLE available_upgrade" " (id INTEGER PRIMARY KEY)", + ) + cursor.execute("CREATE TABLE installed" " (id INTEGER PRIMARY KEY)") + cursor.execute( + "CREATE TABLE hash_id_request" + " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP," + " message_id INTEGER, hashes BLOB)", + ) + cursor.execute( + "CREATE TABLE task" + " (id INTEGER PRIMARY KEY, queue TEXT," + " timestamp TIMESTAMP, data BLOB)", + ) cursor.close() database.commit() database.close() @@ -414,8 +431,11 @@ hashes2 = ["ha\x00sh3", "ha\x00sh4"] self.store1.add_hash_id_request(hashes1) self.store1.add_hash_id_request(hashes2) - hashes = [hash for request in self.store2.iter_hash_id_requests() - for hash in request.hashes] + hashes = [ + hash + for request in self.store2.iter_hash_id_requests() + for hash in request.hashes + ] self.assertEqual(hashes, hashes1 + hashes2) def test_get_initial_hash_id_request_timestamp(self): @@ -445,14 +465,20 @@ self.assertEqual(request2.message_id, 456) def test_get_hash_id_request_with_unknown_request_id(self): - self.assertRaises(UnknownHashIDRequest, - self.store1.get_hash_id_request, 123) + self.assertRaises( + UnknownHashIDRequest, + self.store1.get_hash_id_request, + 123, + ) def test_remove_hash_id_request(self): request = self.store1.add_hash_id_request(["hash1"]) request.remove() - self.assertRaises(UnknownHashIDRequest, - self.store1.get_hash_id_request, request.id) + self.assertRaises( + UnknownHashIDRequest, + self.store1.get_hash_id_request, + request.id, + ) def test_add_task(self): data = {"answer": 42} @@ -514,10 +540,16 @@ request1 = self.store1.add_hash_id_request(["hash1"]) request2 = self.store1.add_hash_id_request(["hash2"]) self.store1.clear_hash_id_requests() - self.assertRaises(UnknownHashIDRequest, - self.store1.get_hash_id_request, request1.id) - self.assertRaises(UnknownHashIDRequest, - self.store1.get_hash_id_request, request2.id) + self.assertRaises( + UnknownHashIDRequest, + self.store1.get_hash_id_request, + request1.id, + ) + self.assertRaises( + UnknownHashIDRequest, + self.store1.get_hash_id_request, + request2.id, + ) def test_clear_tasks(self): data = {"answer": 42} diff -Nru landscape-client-23.02/landscape/lib/backoff.py landscape-client-23.08/landscape/lib/backoff.py --- landscape-client-23.02/landscape/lib/backoff.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/backoff.py 2023-08-17 21:19:32.000000000 +0000 @@ -48,6 +48,6 @@ for a delay of 12 and 25% stagger, it works out to 9 + rand(0,3) """ delay = self.get_delay() - non_random_part = delay * (1-stagger_fraction) + non_random_part = delay * (1 - stagger_fraction) random_part = delay * stagger_fraction * random.random() return int(non_random_part + random_part) diff -Nru landscape-client-23.02/landscape/lib/base64.py landscape-client-23.08/landscape/lib/base64.py --- landscape-client-23.02/landscape/lib/base64.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/base64.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from landscape.lib.compat import _PY3 if _PY3: diff -Nru landscape-client-23.02/landscape/lib/bootstrap.py landscape-client-23.08/landscape/lib/bootstrap.py --- landscape-client-23.02/landscape/lib/bootstrap.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/bootstrap.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,10 @@ -from string import Template -import pwd import grp import os +import pwd +from string import Template -class BootstrapList(object): - +class BootstrapList: def __init__(self, bootstraps): self._bootstraps = bootstraps @@ -14,8 +13,7 @@ bootstrap.bootstrap(**vars) -class BootstrapPath(object): - +class BootstrapPath: def __init__(self, path, username=None, group=None, mode=None): self.path = path self.username = username @@ -48,13 +46,11 @@ class BootstrapFile(BootstrapPath): - def _create(self, path): open(path, "a").close() class BootstrapDirectory(BootstrapPath): - def _create(self, path): try: os.makedirs(path) diff -Nru landscape-client-23.02/landscape/lib/bpickle.py landscape-client-23.08/landscape/lib/bpickle.py --- landscape-client-23.02/landscape/lib/bpickle.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/bpickle.py 2023-08-17 21:19:32.000000000 +0000 @@ -31,8 +31,8 @@ This file is modified from the original to work with python3, but should be wire compatible and behave the same way (bugs notwithstanding). """ - -from landscape.lib.compat import long, _PY3 +from landscape.lib.compat import _PY3 +from landscape.lib.compat import long dumps_table = {} loads_table = {} @@ -42,7 +42,7 @@ try: return _dt[type(obj)](obj) except KeyError as e: - raise ValueError("Unsupported type: %s" % e) + raise ValueError(f"Unsupported type: {e}") def loads(byte_string, _lt=loads_table, as_is=False): @@ -59,34 +59,30 @@ # we slice the bytestring instead. return _lt[byte_string[0:1]](byte_string, 0, as_is=as_is)[0] except KeyError as e: - raise ValueError("Unknown type character: %s" % e) + raise ValueError(f"Unknown type character: {e}") except IndexError: raise ValueError("Corrupted data") def dumps_bool(obj): - return ("b%d" % int(obj) - ).encode("utf-8") + return (f"b{int(obj):d}").encode("utf-8") def dumps_int(obj): - return ("i%d;" % obj - ).encode("utf-8") + return (f"i{obj:d};").encode("utf-8") def dumps_float(obj): - return ("f%r;" % obj - ).encode("utf-8") + return (f"f{obj!r};").encode("utf-8") def dumps_bytes(obj): - return ("s%d:" % (len(obj),)).encode("utf-8") + obj + return (f"s{len(obj):d}:").encode("utf-8") + obj def dumps_unicode(obj): bobj = obj.encode("utf-8") - return ("u%d:%s" % (len(bobj), obj) - ).encode("utf-8") + return (f"u{len(bobj):d}:{obj}").encode("utf-8") def dumps_list(obj, _dt=dumps_table): @@ -114,28 +110,28 @@ def loads_bool(bytestring, pos, as_is=False): - return bool(int(bytestring[pos+1:pos+2])), pos+2 + return bool(int(bytestring[pos + 1 : pos + 2])), pos + 2 def loads_int(bytestring, pos, as_is=False): endpos = bytestring.index(b";", pos) - return int(bytestring[pos+1:endpos]), endpos+1 + return int(bytestring[pos + 1 : endpos]), endpos + 1 def loads_float(bytestring, pos, as_is=False): endpos = bytestring.index(b";", pos) - return float(bytestring[pos+1:endpos]), endpos+1 + return float(bytestring[pos + 1 : endpos]), endpos + 1 def loads_bytes(bytestring, pos, as_is=False): - startpos = bytestring.index(b":", pos)+1 - endpos = startpos+int(bytestring[pos+1:startpos-1]) + startpos = bytestring.index(b":", pos) + 1 + endpos = startpos + int(bytestring[pos + 1 : startpos - 1]) return bytestring[startpos:endpos], endpos def loads_unicode(bytestring, pos, as_is=False): - startpos = bytestring.index(b":", pos)+1 - endpos = startpos+int(bytestring[pos+1:startpos-1]) + startpos = bytestring.index(b":", pos) + 1 + endpos = startpos + int(bytestring[pos + 1 : startpos - 1]) return bytestring[startpos:endpos].decode("utf-8"), endpos @@ -143,75 +139,83 @@ pos += 1 res = [] append = res.append - while bytestring[pos:pos+1] != b";": - obj, pos = _lt[bytestring[pos:pos+1]](bytestring, pos, as_is=as_is) + while bytestring[pos : pos + 1] != b";": + obj, pos = _lt[bytestring[pos : pos + 1]](bytestring, pos, as_is=as_is) append(obj) - return res, pos+1 + return res, pos + 1 def loads_tuple(bytestring, pos, _lt=loads_table, as_is=False): pos += 1 res = [] append = res.append - while bytestring[pos:pos+1] != b";": - obj, pos = _lt[bytestring[pos:pos+1]](bytestring, pos, as_is=as_is) + while bytestring[pos : pos + 1] != b";": + obj, pos = _lt[bytestring[pos : pos + 1]](bytestring, pos, as_is=as_is) append(obj) - return tuple(res), pos+1 + return tuple(res), pos + 1 def loads_dict(bytestring, pos, _lt=loads_table, as_is=False): pos += 1 res = {} - while bytestring[pos:pos+1] != b";": - key, pos = _lt[bytestring[pos:pos+1]](bytestring, pos, as_is=as_is) - val, pos = _lt[bytestring[pos:pos+1]](bytestring, pos, as_is=as_is) + while bytestring[pos : pos + 1] != b";": + key, pos = _lt[bytestring[pos : pos + 1]](bytestring, pos, as_is=as_is) + val, pos = _lt[bytestring[pos : pos + 1]](bytestring, pos, as_is=as_is) if _PY3 and not as_is and isinstance(key, bytes): # Although the wire format of dictionary keys is ASCII bytes, the # code actually expects them to be strings, so we convert them # here. key = key.decode("ascii") res[key] = val - return res, pos+1 + return res, pos + 1 def loads_none(str, pos, as_is=False): - return None, pos+1 + return None, pos + 1 -dumps_table.update({ - bool: dumps_bool, - int: dumps_int, - float: dumps_float, - list: dumps_list, - tuple: dumps_tuple, - dict: dumps_dict, - type(None): dumps_none, - bytes: dumps_bytes, -}) - - -loads_table.update({ - b"b": loads_bool, - b"i": loads_int, - b"f": loads_float, - b"l": loads_list, - b"t": loads_tuple, - b"d": loads_dict, - b"n": loads_none, - b"s": loads_bytes, - b"u": loads_unicode, -}) +dumps_table.update( + { + bool: dumps_bool, + int: dumps_int, + float: dumps_float, + list: dumps_list, + tuple: dumps_tuple, + dict: dumps_dict, + type(None): dumps_none, + bytes: dumps_bytes, + }, +) + + +loads_table.update( + { + b"b": loads_bool, + b"i": loads_int, + b"f": loads_float, + b"l": loads_list, + b"t": loads_tuple, + b"d": loads_dict, + b"n": loads_none, + b"s": loads_bytes, + b"u": loads_unicode, + }, +) if bytes is str: # Python 2.x: We need to map internal unicode strings to UTF-8 # encoded strings, and longs to ints. - dumps_table.update({ - unicode: dumps_unicode, # noqa - long: dumps_int, # noqa - }) + dumps_table.update( + { + unicode: dumps_unicode, # noqa + long: dumps_int, # noqa + }, + ) else: # Python 3.x: We need to map internal strings to UTF-8 encoded strings. - dumps_table.update({ - str: dumps_unicode, - }) + dumps_table.update( + { + str: dumps_unicode, + }, + ) diff -Nru landscape-client-23.02/landscape/lib/cli.py landscape-client-23.08/landscape/lib/cli.py --- landscape-client-23.02/landscape/lib/cli.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/cli.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,16 @@ - def add_cli_options(parser, cfgfile=None, datadir=None): """Add common CLI options to the given arg parser.""" from . import config + config.add_cli_options(parser, cfgfile) datadirhelp = "The directory in which to store data files." if datadir: - datadirhelp += " (default: {!r})".format(datadir) - parser.add_option("-d", "--data-path", metavar="PATH", default=datadir, - help=datadirhelp) + datadirhelp += f" (default: {datadir!r})" + parser.add_option( + "-d", + "--data-path", + metavar="PATH", + default=datadir, + help=datadirhelp, + ) diff -Nru landscape-client-23.02/landscape/lib/cloud.py landscape-client-23.08/landscape/lib/cloud.py --- landscape-client-23.02/landscape/lib/cloud.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/cloud.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,7 @@ from landscape.lib.fetch import fetch_async EC2_HOST = "169.254.169.254" -EC2_API = "http://%s/latest" % (EC2_HOST,) +EC2_API = f"http://{EC2_HOST}/latest" MAX_LENGTH = 64 @@ -15,9 +15,11 @@ # number of connections to the backend minimal. See lp:567515. deferred = _fetch_ec2_item("instance-id", cloud_data, fetch) deferred.addCallback( - lambda ignore: _fetch_ec2_item("instance-type", cloud_data, fetch)) + lambda ignore: _fetch_ec2_item("instance-type", cloud_data, fetch), + ) deferred.addCallback( - lambda ignore: _fetch_ec2_item("ami-id", cloud_data, fetch)) + lambda ignore: _fetch_ec2_item("ami-id", cloud_data, fetch), + ) def return_result(ignore): """Record the instance data returned by the EC2 API.""" @@ -32,7 +34,9 @@ return { "instance-id": _process_result(instance_id), "ami-id": _process_result(ami_id), - "instance-type": _process_result(instance_type)} + "instance-type": _process_result(instance_type), + } + deferred.addCallback(return_result) return deferred diff -Nru landscape-client-23.02/landscape/lib/compat.py landscape-client-23.08/landscape/lib/compat.py --- landscape-client-23.02/landscape/lib/compat.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/compat.py 2023-08-17 21:19:32.000000000 +0000 @@ -6,13 +6,16 @@ if _PY3: import _pickle as cPickle from configparser import ConfigParser, NoOptionError + SafeConfigParser = ConfigParser import _thread as thread from io import StringIO + stringio = cstringio = StringIO from builtins import input + unicode = str long = int @@ -23,8 +26,10 @@ import thread from StringIO import StringIO + stringio = StringIO from cStringIO import StringIO as cstringio + input = raw_input long = long unicode = unicode diff -Nru landscape-client-23.02/landscape/lib/config.py landscape-client-23.08/landscape/lib/config.py --- landscape-client-23.02/landscape/lib/config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,25 +1,30 @@ -from __future__ import absolute_import - -from logging import getLogger -from optparse import OptionParser -import os import os.path import sys +from logging import getLogger +from optparse import OptionParser -from configobj import ConfigObj, ConfigObjError -from twisted.python.compat import StringType as basestring +from configobj import ConfigObj +from configobj import ConfigObjError +from twisted.python.compat import StringType from landscape.lib import cli def add_cli_options(parser, filename=None): """Add common config-related CLI options to the given arg parser.""" - cfgfilehelp = ("Use config from this file (any command line " - "options override settings from the file).") + cfgfilehelp = ( + "Use config from this file (any command line " + "options override settings from the file)." + ) if filename is not None: - cfgfilehelp += " (default: {!r})".format(filename) - parser.add_option("-c", "--config", metavar="FILE", default=filename, - help=cfgfilehelp) + cfgfilehelp += f" (default: {filename!r})" + parser.add_option( + "-c", + "--config", + metavar="FILE", + default=filename, + help=cfgfilehelp, + ) class ConfigSpecOptionParser(OptionParser): @@ -36,7 +41,7 @@ return option -class BaseConfiguration(object): +class BaseConfiguration: """Base class for configuration implementations. @cvar required_options: Optionally, a sequence of key names to require when @@ -84,10 +89,12 @@ Otherwise C{AttributeError} is raised. """ - for options in [self._set_options, - self._command_line_options, - self._config_file_options, - self._command_line_defaults]: + for options in [ + self._set_options, + self._command_line_options, + self._config_file_options, + self._command_line_defaults, + ]: if name in options: value = options[name] break @@ -96,7 +103,7 @@ value = None else: raise AttributeError(name) - if isinstance(value, basestring): + if isinstance(value, StringType): option = self._parser.get_option("--" + name.replace("_", "-")) if option is not None: value = option.convert_value(None, value) @@ -127,7 +134,7 @@ not stored in the configuration file. """ if name.startswith("_"): - super(BaseConfiguration, self).__setattr__(name, value) + super().__setattr__(name, value) else: self._set_options[name] = value @@ -158,9 +165,10 @@ allow_missing = accept_nonexistent_default_config # Parse configuration file, if found. for config_filename in config_filenames: - if (os.path.isfile(config_filename) and - os.access(config_filename, os.R_OK) - ): + if os.path.isfile(config_filename) and os.access( + config_filename, + os.R_OK, + ): self.load_configuration_file(config_filename) break @@ -169,8 +177,9 @@ if not allow_missing: if len(config_filenames) == 1: message = ( - "error: config file %s can't be read" % - config_filenames[0]) + f"error: config file {config_filenames[0]} " + "can't be read" + ) else: message = "error: no config file could be read" sys.exit(message) @@ -180,9 +189,13 @@ # Check that all needed options were given. for option in self.required_options: if not getattr(self, option): - sys.exit("error: must specify --%s " - "or the '%s' directive in the config file." - % (option.replace('_', '-'), option)) + sys.exit( + "error: must specify --{} " + "or the '{}' directive in the config file.".format( + option.replace("_", "-"), + option, + ), + ) def _load_external_options(self): """Hook for loading options from elsewhere (e.g. for --import).""" @@ -224,8 +237,13 @@ # from writing "" as an empty value, which get_plugins interprets as # '""' which search for a plugin named "". See bug #1241821. try: - config_obj = ConfigObj(config_source, list_values=False, - raise_errors=False, write_empty_values=True) + config_obj = ConfigObj( + config_source, + list_values=False, + raise_errors=False, + write_empty_values=True, + encoding=getattr(self, "encoding", None), + ) except ConfigObjError as e: logger = getLogger() logger.warn("ERROR at {}: {}".format(config_source, str(e))) @@ -262,10 +280,11 @@ section = config_obj[self.config_section] for name, value in all_options.items(): if name != "config" and name not in self.unsaved_options: - if (value == self._command_line_defaults.get(name) and - name not in self._config_file_options and - name not in self._command_line_options - ): + if ( + value == self._command_line_defaults.get(name) + and name not in self._config_file_options + and name not in self._command_line_options + ): # We don't want to write this value to the config file # as it is default value and as not present in the diff -Nru landscape-client-23.02/landscape/lib/disk.py landscape-client-23.08/landscape/lib/disk.py --- landscape-client-23.02/landscape/lib/disk.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/disk.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,23 +1,45 @@ -from __future__ import division - +import codecs import os import re -import codecs from landscape.lib.compat import _PY3 # List of filesystem types authorized when generating disk use statistics. STABLE_FILESYSTEMS = frozenset( - ["ext", "ext2", "ext3", "ext4", "reiserfs", "ntfs", "msdos", "dos", "vfat", - "xfs", "hpfs", "jfs", "ufs", "hfs", "hfsplus", "simfs", "drvfs", "lxfs"]) + [ + "ext", + "ext2", + "ext3", + "ext4", + "reiserfs", + "ntfs", + "msdos", + "dos", + "vfat", + "xfs", + "hpfs", + "jfs", + "ufs", + "hfs", + "hfsplus", + "simfs", + "drvfs", + "lxfs", + "zfs", + "btrfs", + ], +) EXTRACT_DEVICE = re.compile("([a-z]+)[0-9]*") -def get_mount_info(mounts_file, statvfs_, - filesystems_whitelist=STABLE_FILESYSTEMS): +def get_mount_info( + mounts_file, + statvfs_, + filesystems_whitelist=STABLE_FILESYSTEMS, +): """ This is a generator that yields information about mounted filesystems. @@ -40,9 +62,10 @@ mount_point = codecs.decode(mount_point, "string_escape") except ValueError: continue - if (filesystems_whitelist is not None and - filesystem not in filesystems_whitelist - ): + if ( + filesystems_whitelist is not None + and filesystem not in filesystems_whitelist + ): continue megabytes = 1024 * 1024 try: @@ -52,9 +75,13 @@ block_size = stats.f_bsize total_space = (stats.f_blocks * block_size) // megabytes free_space = (stats.f_bfree * block_size) // megabytes - yield {"device": device, "mount-point": mount_point, - "filesystem": filesystem, "total-space": total_space, - "free-space": free_space} + yield { + "device": device, + "mount-point": mount_point, + "filesystem": filesystem, + "total-space": total_space, + "free-space": free_space, + } def get_filesystem_for_path(path, mounts_file, statvfs_): @@ -78,9 +105,9 @@ for info in get_mount_info(mounts_file, statvfs_): mount_segments = info["mount-point"].split("/") if path.startswith(info["mount-point"]): - if ((not candidate) or - path_segments[:len(mount_segments)] == mount_segments - ): + if (not candidate) or path_segments[ + : len(mount_segments) + ] == mount_segments: candidate = info return candidate @@ -106,7 +133,7 @@ try: with open(path, "r") as f: contents = f.readline() - except IOError: + except OSError: return False if contents.strip() == "1": diff -Nru landscape-client-23.02/landscape/lib/fd.py landscape-client-23.08/landscape/lib/fd.py --- landscape-client-23.02/landscape/lib/fd.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/fd.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,7 +4,6 @@ accidentally getting a reactor or something else that might create a critical file descriptor. """ - import os import resource diff -Nru landscape-client-23.02/landscape/lib/fetch.py landscape-client-23.08/landscape/lib/fetch.py --- landscape-client-23.02/landscape/lib/fetch.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/fetch.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,12 @@ +import io import os import sys -import io - from optparse import OptionParser from twisted.internet.defer import DeferredList from twisted.internet.threads import deferToThread -from twisted.python.compat import iteritems, networkString +from twisted.python.compat import iteritems +from twisted.python.compat import networkString class FetchError(Exception): @@ -14,39 +14,47 @@ class HTTPCodeError(FetchError): - def __init__(self, http_code, body): self.http_code = http_code self.body = body def __str__(self): - return "Server returned HTTP code %d" % self.http_code + return f"Server returned HTTP code {self.http_code:d}" def __repr__(self): - return "" % self.http_code + return f"" class PyCurlError(FetchError): - def __init__(self, error_code, message): self.error_code = error_code self._message = message def __str__(self): - return "Error %d: %s" % (self.error_code, self.message) + return f"Error {self.error_code:d}: {self.message}" def __repr__(self): - return "" % (self.error_code, - self.message) + return f"" @property def message(self): return self._message -def fetch(url, post=False, data="", headers={}, cainfo=None, curl=None, - connect_timeout=30, total_timeout=600, insecure=False, follow=True, - user_agent=None, proxy=None): +def fetch( + url, + post=False, + data="", + headers={}, + cainfo=None, + curl=None, + connect_timeout=30, + total_timeout=600, + insecure=False, + follow=True, + user_agent=None, + proxy=None, +): """Retrieve a URL and return the content. @param url: The url to be fetched. @@ -65,6 +73,7 @@ @param proxy: The proxy url to use for the request. """ import pycurl + if not isinstance(data, bytes): data = data.encode("utf-8") output = io.BytesIO(data) @@ -88,8 +97,10 @@ curl.setopt(pycurl.CAINFO, networkString(cainfo)) if headers: - curl.setopt(pycurl.HTTPHEADER, - ["%s: %s" % pair for pair in sorted(iteritems(headers))]) + curl.setopt( + pycurl.HTTPHEADER, + [f"{key}: {value}" for (key, value) in sorted(iteritems(headers))], + ) if insecure: curl.setopt(pycurl.SSL_VERIFYPEER, False) @@ -189,8 +200,12 @@ def log_error(failure, url): if logger: - logger("Couldn't fetch file from %s (%s)" % ( - url, str(failure.value))) + logger( + "Couldn't fetch file from {} ({})".format( + url, + str(failure.value), + ), + ) return failure return fetch_many_async(urls, callback=write, errback=log_error, **kwargs) @@ -202,8 +217,14 @@ parser.add_option("--data", default="") parser.add_option("--cainfo") options, (url,) = parser.parse_args(args) - print(fetch(url, post=options.post, data=options.data, - cainfo=options.cainfo)) + print( + fetch( + url, + post=options.post, + data=options.data, + cainfo=options.cainfo, + ), + ) if __name__ == "__main__": diff -Nru landscape-client-23.02/landscape/lib/format.py landscape-client-23.08/landscape/lib/format.py --- landscape-client-23.02/landscape/lib/format.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/format.py 2023-08-17 21:19:32.000000000 +0000 @@ -12,20 +12,20 @@ # from the base class and the method's class name will be from # object's class. name = repr(object).split(" ")[2] - return "%s.%s()" % (object.__module__, name) + return f"{object.__module__}.{name}()" elif inspect.isfunction(object): name = repr(object).split(" ")[1] - return "%s.%s()" % (object.__module__, name) - return "%s.%s" % (object.__class__.__module__, object.__class__.__name__) + return f"{object.__module__}.{name}()" + return f"{object.__class__.__module__}.{object.__class__.__name__}" def format_delta(seconds): if not seconds: seconds = 0.0 - return "%.02fs" % float(seconds) + return f"{float(seconds):.02f}s" def format_percent(percent): if not percent: percent = 0.0 - return "%.02f%%" % float(percent) + return f"{float(percent):.02f}%" diff -Nru landscape-client-23.02/landscape/lib/fs.py landscape-client-23.08/landscape/lib/fs.py --- landscape-client-23.02/landscape/lib/fs.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/fs.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,7 +2,6 @@ import os import time - from twisted.python.compat import long diff -Nru landscape-client-23.02/landscape/lib/gpg.py landscape-client-23.08/landscape/lib/gpg.py --- landscape-client-23.02/landscape/lib/gpg.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/gpg.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,6 @@ import itertools import shutil import tempfile - from glob import glob from twisted.internet.utils import getProcessOutputAndValue @@ -20,7 +19,7 @@ @param apt_dir: Optionally, path to apt trusted keyring. @return: a C{Deferred} resulting in C{True} if the signature is valid, C{False} otherwise. - """ + """ def remove_gpg_home(ignored): shutil.rmtree(gpg_home) @@ -32,21 +31,33 @@ # bytes here. out, err = out.decode("ascii"), err.decode("ascii") if code != 0: - raise InvalidGPGSignature("%s failed (out='%s', err='%s', " - "code='%d')" % (gpg, out, err, code)) + raise InvalidGPGSignature( + f"{gpg} failed (out='{out}', err='{err}', code='{code:d}')", + ) gpg_home = tempfile.mkdtemp() - keyrings = tuple(itertools.chain(*[ - ("--keyring", keyring) - for keyring in sorted( - glob("{}/trusted.gpg".format(apt_dir)) + - glob("{}/trusted.gpg.d/*.gpg".format(apt_dir)) - ) - ])) + keyrings = tuple( + itertools.chain( + *[ + ("--keyring", keyring) + for keyring in sorted( + glob(f"{apt_dir}/trusted.gpg") + + glob(f"{apt_dir}/trusted.gpg.d/*.gpg"), + ) + ], + ), + ) args = ( - "--no-options", "--homedir", gpg_home, "--no-default-keyring", - "--ignore-time-conflict" - ) + keyrings + ("--verify", signature, filename) + ( + "--no-options", + "--homedir", + gpg_home, + "--no-default-keyring", + "--ignore-time-conflict", + ) + + keyrings + + ("--verify", signature, filename) + ) result = getProcessOutputAndValue(gpg, args=args) result.addBoth(remove_gpg_home) diff -Nru landscape-client-23.02/landscape/lib/jiffies.py landscape-client-23.08/landscape/lib/jiffies.py --- landscape-client-23.02/landscape/lib/jiffies.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/jiffies.py 2023-08-17 21:19:32.000000000 +0000 @@ -29,7 +29,7 @@ uptime2_data = read_uptime2() - stat_file = open("/proc/%d/stat" % pid) + stat_file = open(f"/proc/{pid:d}/stat") stat_data = stat_file.read() stat_file.close() @@ -39,8 +39,8 @@ seconds_uptime2 = float(uptime2_data.split()[0]) jiffie_uptime = int(stat_data.split()[21]) - jiffies1 = int(jiffie_uptime/seconds_uptime1+0.5) - jiffies2 = int(jiffie_uptime/seconds_uptime2+0.5) + jiffies1 = int(jiffie_uptime / seconds_uptime1 + 0.5) + jiffies2 = int(jiffie_uptime / seconds_uptime2 + 0.5) if jiffies1 == jiffies2: break diff -Nru landscape-client-23.02/landscape/lib/juju.py landscape-client-23.08/landscape/lib/juju.py --- landscape-client-23.02/landscape/lib/juju.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/juju.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,6 @@ -from __future__ import absolute_import - -import os import json import logging +import os from landscape.lib.fs import read_text_file @@ -23,7 +21,8 @@ # return None in any case. except Exception: logging.exception( - "Error attempting to read JSON from %s" % config.juju_filename) + f"Error attempting to read JSON from {config.juju_filename}", + ) return None juju_info["api-addresses"] = juju_info["api-addresses"].split() diff -Nru landscape-client-23.02/landscape/lib/lock.py landscape-client-23.08/landscape/lib/lock.py --- landscape-client-23.02/landscape/lib/lock.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/lock.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,6 @@ import fcntl -import time import os +import time class LockError(Exception): @@ -18,7 +18,7 @@ while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError: + except OSError: if started < time.time() - timeout: raise LockError("Couldn't obtain lock") else: diff -Nru landscape-client-23.02/landscape/lib/logging.py landscape-client-23.08/landscape/lib/logging.py --- landscape-client-23.02/landscape/lib/logging.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/logging.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,43 +1,72 @@ -from __future__ import absolute_import - import logging -import os import os.path import sys +FORMAT = "%(asctime)s %(levelname)-8s [%(threadName)-10s] %(message)s" + -FORMAT = '%(asctime)s %(levelname)-8s [%(threadName)-10s] %(message)s' +class LoggingAttributeError(Exception): + pass -def add_cli_options(parser, level='info', logdir=None): +def add_cli_options(parser, level="info", logdir=None): """Add common logging-related CLI options to the given arg parser.""" - parser.add_option('-q', '--quiet', default=False, action='store_true', - help='Do not log to the standard output.') + parser.add_option( + "-q", + "--quiet", + default=False, + action="store_true", + help="Do not log to the standard output.", + ) - logdirhelp = 'The directory in which to write log files' + logdirhelp = "The directory in which to write log files" if logdir: - logdirhelp += ' (default: {!r}).'.format(logdir) - parser.add_option('-l', '--log-dir', metavar='FILE', default=logdir, - help=logdirhelp) + logdirhelp += f" (default: {logdir!r})." + parser.add_option( + "-l", + "--log-dir", + metavar="FILE", + default=logdir, + help=logdirhelp, + ) + + parser.add_option( + "--log-level", + default=level, + help="One of debug, info, warning, error or critical.", + ) - parser.add_option('--log-level', default=level, - help='One of debug, info, warning, error or critical.') - -def init_app_logging(logdir, level='info', progname=None, quiet=False): +def init_app_logging(logdir, level="info", progname=None, quiet=False): """Given a log dir, set up logging for an application.""" if progname is None: progname = os.path.basename(sys.argv[0]) - level = logging.getLevelName(level.upper()) - _init_logging( + levelcode = logging.getLevelName(level.upper()) + if isinstance(levelcode, int): + _init_logging( + logging.getLogger(), + levelcode, + logdir, + progname, + logging.Formatter(FORMAT), + sys.stdout if not quiet else None, + ) + return logging.getLogger() + else: + _init_logging( logging.getLogger(), - level, + logging.INFO, logdir, progname, logging.Formatter(FORMAT), sys.stdout if not quiet else None, - ) - return logging.getLogger() + ) + msg = ( + f"Unknown level {level!r}, conversion to " + f"logging code was {levelcode!r}" + ) + logging.error(msg) + raise LoggingAttributeError(msg) def _init_logging(logger, level, logdir, logname, formatter, stdout=None): @@ -47,12 +76,12 @@ # Set up the log file. if not os.path.exists(logdir): os.makedirs(logdir) - filename = os.path.join(logdir, logname + '.log') + filename = os.path.join(logdir, logname + ".log") # Set the handlers. handlers = [ logging.FileHandler(filename), - ] + ] if stdout: handlers.append(logging.StreamHandler(stdout)) for handler in handlers: @@ -72,8 +101,7 @@ handler.acquire() try: handler.stream.close() - handler.stream = open(handler.baseFilename, - handler.mode) + handler.stream = open(handler.baseFilename, handler.mode) finally: handler.release() logging.info("Landscape Logs rotated") diff -Nru landscape-client-23.02/landscape/lib/log.py landscape-client-23.08/landscape/lib/log.py --- landscape-client-23.02/landscape/lib/log.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/log.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging diff -Nru landscape-client-23.02/landscape/lib/lsb_release.py landscape-client-23.08/landscape/lib/lsb_release.py --- landscape-client-23.02/landscape/lib/lsb_release.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/lsb_release.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,7 @@ """Get information from /usr/bin/lsb_release.""" import os -from subprocess import CalledProcessError, check_output +from subprocess import CalledProcessError +from subprocess import check_output LSB_RELEASE = "/usr/bin/lsb_release" LSB_RELEASE_FILENAME = "/etc/lsb_release" @@ -21,9 +22,9 @@ if lsb_release_filename and os.path.exists(lsb_release_filename): return parse_lsb_release_file(lsb_release_filename) - with open(os.devnull, 'w') as FNULL: + with open(os.devnull, "w") as fnull: try: - lsb_info = check_output([LSB_RELEASE, "-as"], stderr=FNULL) + lsb_info = check_output([LSB_RELEASE, "-as"], stderr=fnull) except (CalledProcessError, FileNotFoundError): # Fall back to reading file, even if it doesn't exist. return parse_lsb_release_file(lsb_release_filename) diff -Nru landscape-client-23.02/landscape/lib/message.py landscape-client-23.08/landscape/lib/message.py --- landscape-client-23.02/landscape/lib/message.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/message.py 2023-08-17 21:19:32.000000000 +0000 @@ -51,8 +51,9 @@ # No messages transferred, or # "Old": We'll try to send these old messages that the # other side still wants. - pending_offset = (store.get_pending_offset() + - next_expected - old_sequence) + pending_offset = ( + store.get_pending_offset() + next_expected - old_sequence + ) store.set_pending_offset(pending_offset) store.set_sequence(next_expected) diff -Nru landscape-client-23.02/landscape/lib/monitor.py landscape-client-23.08/landscape/lib/monitor.py --- landscape-client-23.02/landscape/lib/monitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/monitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,11 @@ -from __future__ import absolute_import - import logging import time -from landscape.lib.format import format_delta, format_percent +from landscape.lib.format import format_delta +from landscape.lib.format import format_percent -class Timer(object): +class Timer: """ A timer keeps track of the number of seconds passed during it's lifetime and since the last reset. @@ -42,7 +41,7 @@ """ def __init__(self, event_name, create_time=None): - super(Monitor, self).__init__(create_time=create_time) + super().__init__(create_time=create_time) self.event_name = event_name self.count = 0 self.total_count = 0 @@ -52,12 +51,16 @@ self.total_count += 1 def reset(self): - super(Monitor, self).reset() + super().reset() self.count = 0 def log(self): - logging.info("%d %s events occurred in the last %s.", self.count, - self.event_name, format_delta(self.since_reset())) + logging.info( + "%d %s events occurred in the last %s.", + self.count, + self.event_name, + format_delta(self.since_reset()), + ) self.reset() @@ -68,28 +71,36 @@ time. """ - def __init__(self, repeat_interval, maximum_count, event_name, - create_time=None): - super(BurstMonitor, self).__init__(event_name, create_time=create_time) + def __init__( + self, + repeat_interval, + maximum_count, + event_name, + create_time=None, + ): + super().__init__(event_name, create_time=create_time) self.repeat_interval = repeat_interval self.maximum_count = maximum_count self._last_times = [] def ping(self): - super(BurstMonitor, self).ping() + super().ping() now = self.time() self._last_times.append(now) - if (self._last_times[0] - now > self.repeat_interval or - len(self._last_times) > self.maximum_count + 1 - ): + if ( + self._last_times[0] - now > self.repeat_interval + or len(self._last_times) > self.maximum_count + 1 + ): self._last_times.pop(0) def warn(self): if not self._last_times: return False delta = self.time() - self._last_times[0] - return (delta < self.repeat_interval and - len(self._last_times) >= self.maximum_count + 1) + return ( + delta < self.repeat_interval + and len(self._last_times) >= self.maximum_count + 1 + ) class CoverageMonitor(Monitor): @@ -103,10 +114,17 @@ from this monitor every N seconds. """ - def __init__(self, repeat_interval, min_percent, event_name, - create_time=None): - super(CoverageMonitor, self).__init__(event_name, - create_time=create_time) + def __init__( + self, + repeat_interval, + min_percent, + event_name, + create_time=None, + ): + super().__init__( + event_name, + create_time=create_time, + ) self.repeat_interval = repeat_interval self.min_percent = min_percent @@ -129,9 +147,14 @@ log = logging.info if self.warn(): log = logging.warning - log("%d of %d expected %s events (%s) occurred in the last %s.", - self.count, self.expected_count, self.event_name, - format_percent(percent), format_delta(self.since_reset())) + log( + "%d of %d expected %s events (%s) occurred in the last %s.", + self.count, + self.expected_count, + self.event_name, + format_percent(percent), + format_delta(self.since_reset()), + ) self.reset() @@ -154,10 +177,17 @@ monitor every N seconds. """ - def __init__(self, repeat_interval, min_frequency, event_name, - create_time=None): - super(FrequencyMonitor, self).__init__(event_name, - create_time=create_time) + def __init__( + self, + repeat_interval, + min_frequency, + event_name, + create_time=None, + ): + super().__init__( + event_name, + create_time=create_time, + ) self.repeat_interval = repeat_interval self.min_frequency = min_frequency self._last_count = self._create_time() @@ -168,22 +198,25 @@ return since_ping // self.repeat_interval def ping(self): - super(FrequencyMonitor, self).ping() + super().ping() self._last_count = self._create_time() def log(self): if self.warn(): - logging.warning("Only %d of %d minimum expected %s events " - "occurred in the last %s.", self.count, - self.expected_count, self.event_name, - format_delta(self.repeat_interval)) + logging.warning( + "Only %d of %d minimum expected %s events " + "occurred in the last %s.", + self.count, + self.expected_count, + self.event_name, + format_delta(self.repeat_interval), + ) self.reset() def warn(self): if self.repeat_interval and self.min_frequency: - if ((self._create_time() - self._last_count >= - self.repeat_interval) and - self.count < self.min_frequency - ): + if ( + self._create_time() - self._last_count >= self.repeat_interval + ) and self.count < self.min_frequency: return True return False diff -Nru landscape-client-23.02/landscape/lib/network.py landscape-client-23.08/landscape/lib/network.py --- landscape-client-23.02/landscape/lib/network.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/network.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,17 @@ -from __future__ import absolute_import - """ Network introspection utilities using ioctl and the /proc filesystem. """ import array +import errno import fcntl +import logging import socket import struct -import errno -import logging import netifaces -from landscape.lib.compat import long, _PY3 + +from landscape.lib.compat import _PY3 +from landscape.lib.compat import long __all__ = ["get_active_device_info", "get_network_traffic"] @@ -40,8 +40,8 @@ @param ifaddresses: a dict as returned by L{netifaces.ifaddresses} """ - inet_addr = ifaddresses.get(netifaces.AF_INET, [{}])[0].get('addr') - inet6_addr = ifaddresses.get(netifaces.AF_INET6, [{}])[0].get('addr') + inet_addr = ifaddresses.get(netifaces.AF_INET, [{}])[0].get("addr") + inet6_addr = ifaddresses.get(netifaces.AF_INET6, [{}])[0].get("addr") return bool(inet_addr or inet6_addr) @@ -58,8 +58,11 @@ results[netifaces.AF_INET] = ifaddresses[netifaces.AF_INET] if netifaces.AF_INET6 in ifaddresses: # Ignore link-local IPv6 addresses (fe80::/10). - global_addrs = [addr for addr in ifaddresses[netifaces.AF_INET6] - if not addr['addr'].startswith('fe80:')] + global_addrs = [ + addr + for addr in ifaddresses[netifaces.AF_INET6] + if not addr["addr"].startswith("fe80:") + ] if global_addrs: results[netifaces.AF_INET6] = global_addrs @@ -71,7 +74,7 @@ @param ifaddresses: a dict as returned by L{netifaces.ifaddresses} """ - return ifaddresses[netifaces.AF_INET][0].get('broadcast', '0.0.0.0') + return ifaddresses[netifaces.AF_INET][0].get("broadcast", "0.0.0.0") def get_netmask(ifaddresses): @@ -79,7 +82,7 @@ @param ifaddresses: a dict as returned by L{netifaces.ifaddresses} """ - return ifaddresses[netifaces.AF_INET][0].get('netmask', '') + return ifaddresses[netifaces.AF_INET][0].get("netmask", "") def get_ip_address(ifaddresses): @@ -87,7 +90,7 @@ @param ifaddresses: a dict as returned by L{netifaces.ifaddresses} """ - return ifaddresses[netifaces.AF_INET][0]['addr'] + return ifaddresses[netifaces.AF_INET][0]["addr"] def get_mac_address(ifaddresses): @@ -99,8 +102,8 @@ @param ifaddresses: a dict as returned by L{netifaces.ifaddresses} """ if netifaces.AF_LINK in ifaddresses: - return ifaddresses[netifaces.AF_LINK][0].get('addr', '') - return '' + return ifaddresses[netifaces.AF_LINK][0].get("addr", "") + return "" def get_flags(sock, interface): @@ -111,7 +114,10 @@ @see /usr/include/linux/if.h for the meaning of the flags. """ data = fcntl.ioctl( - sock.fileno(), SIOCGIFFLAGS, struct.pack("256s", interface[:15])) + sock.fileno(), + SIOCGIFFLAGS, + struct.pack("256s", interface[:15]), + ) return struct.unpack("H", data[16:18])[0] @@ -119,7 +125,7 @@ """ Returns a list of interfaces with default routes """ - default_table = netifaces.gateways()['default'] + default_table = netifaces.gateways()["default"] interfaces = [gateway[1] for gateway in default_table.values()] return interfaces @@ -135,8 +141,11 @@ results = [] try: - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, - socket.IPPROTO_IP) + sock = socket.socket( + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + ) for interface in netifaces.interfaces(): if any(f(interface) for f in filters): @@ -161,7 +170,9 @@ ifinfo = {"interface": interface} ifinfo["flags"] = flags ifinfo["speed"], ifinfo["duplex"] = get_network_interface_speed( - sock, ifencoded) + sock, + ifencoded, + ) if extended: ifinfo["ip_addresses"] = ip_addresses @@ -170,7 +181,8 @@ ifinfo["ip_address"] = get_ip_address(ifaddresses) ifinfo["mac_address"] = get_mac_address(ifaddresses) ifinfo["broadcast_address"] = get_broadcast_address( - ifaddresses) + ifaddresses, + ) ifinfo["netmask"] = get_netmask(ifaddresses) results.append(ifinfo) @@ -180,9 +192,13 @@ return results -def get_active_device_info(skipped_interfaces=("lo",), - skip_vlan=True, skip_alias=True, - extended=False, default_only=False): +def get_active_device_info( + skipped_interfaces=("lo",), + skip_vlan=True, + skip_alias=True, + extended=False, + default_only=False, +): def filter_local(interface): return interface in skipped_interfaces @@ -204,13 +220,16 @@ def filter_tap(interface): return interface.startswith("tap") - return get_filtered_if_info(filters=( - filter_tap, - filter_local, - filter_vlan, - filter_alias, - filter_default, - ), extended=extended) + return get_filtered_if_info( + filters=( + filter_tap, + filter_local, + filter_vlan, + filter_alias, + filter_default, + ), + extended=extended, + ) def get_network_traffic(source_file="/proc/net/dev"): @@ -223,8 +242,8 @@ # Parse out the column headers as keys. _, receive_columns, transmit_columns = lines[1].split("|") - columns = ["recv_%s" % column for column in receive_columns.split()] - columns.extend(["send_%s" % column for column in transmit_columns.split()]) + columns = [f"recv_{column}" for column in receive_columns.split()] + columns.extend([f"send_{column}" for column in transmit_columns.split()]) # Parse out the network devices. devices = {} @@ -249,9 +268,14 @@ fqdn = socket.getfqdn() if "localhost" in fqdn: # Try the heavy artillery - fqdn = socket.getaddrinfo(socket.gethostname(), None, socket.AF_INET, - socket.SOCK_DGRAM, socket.IPPROTO_IP, - socket.AI_CANONNAME)[0][3] + fqdn = socket.getaddrinfo( + socket.gethostname(), + None, + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + socket.AI_CANONNAME, + )[0][3] if "localhost" in fqdn: # Another fallback fqdn = socket.gethostname() @@ -283,8 +307,10 @@ speed, duplex = struct.unpack("12xHB28x", res) except (IOError, OSError) as e: if e.errno == errno.EPERM: - logging.warning("Could not determine network interface speed, " - "operation not permitted.") + logging.warning( + "Could not determine network interface speed, " + "operation not permitted.", + ) elif e.errno != errno.EOPNOTSUPP and e.errno != errno.EINVAL: raise e speed = -1 diff -Nru landscape-client-23.02/landscape/lib/persist.py landscape-client-23.08/landscape/lib/persist.py --- landscape-client-23.02/landscape/lib/persist.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/persist.py 2023-08-17 21:19:32.000000000 +0000 @@ -18,17 +18,24 @@ # along with this Python module; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # -import os -import sys import copy +import os import re +import sys from twisted.python.compat import StringType # Py2: basestring, Py3: str -__all__ = ["Persist", "PickleBackend", "BPickleBackend", - "path_string_to_tuple", "path_tuple_to_string", "RootedPersist", - "PersistError", "PersistReadOnlyError"] +__all__ = [ + "Persist", + "PickleBackend", + "BPickleBackend", + "path_string_to_tuple", + "path_tuple_to_string", + "RootedPersist", + "PersistError", + "PersistReadOnlyError", +] NOTHING = object() @@ -42,7 +49,7 @@ pass -class Persist(object): +class Persist: """Persist a hierarchical database of key=>value pairs. @@ -111,17 +118,19 @@ def load_old(): filepathold = filepath + ".old" - if (os.path.isfile(filepathold) and - os.path.getsize(filepathold) > 0 - ): + if ( + os.path.isfile(filepathold) + and os.path.getsize(filepathold) > 0 + ): - # warning("Broken configuration file at %s" % filepath) - # warning("Trying backup at %s" % filepathold) + # warning(f"Broken configuration file at {filepath}") + # warning(f"Trying backup at {filepathold}") try: self._hardmap = self._backend.load(filepathold) except Exception: - raise PersistError("Broken configuration file at %s" % - filepathold) + raise PersistError( + f"Broken configuration file at {filepathold}", + ) return True return False @@ -129,7 +138,7 @@ if not os.path.isfile(filepath): if load_old(): return - raise PersistError("File not found: %s" % filepath) + raise PersistError(f"File not found: {filepath}") if os.path.getsize(filepath) == 0: load_old() return @@ -138,7 +147,7 @@ except Exception: if load_old(): return - raise PersistError("Broken configuration file at %s" % filepath) + raise PersistError(f"Broken configuration file at {filepath}") def save(self, filepath=None): """Save the persist to the given C{filepath}. @@ -173,10 +182,11 @@ newobj = self._backend.get(obj, elem) if newobj is NotImplemented: if queue: - path = path[:-len(queue)] - raise PersistError("Can't traverse %r (%r): %r" % - (type(obj), path_tuple_to_string(path), - str(obj))) + path = path[: -len(queue)] + raise PersistError( + f"Can't traverse {type(obj)!r} " + f"({path_tuple_to_string(path)!r}): {str(obj)!r}", + ) if newobj is marker: break if newobj is not marker: @@ -196,8 +206,10 @@ newvalue = setvalue newobj = self._backend.set(obj, elem, newvalue) if newobj is NotImplemented: - raise PersistError("Can't traverse %r with %r" % - (type(obj), type(elem))) + raise PersistError( + f"Can't traverse {type(obj)!r} " + f"with {type(elem)!r}", + ) if not queue: break obj = newobj @@ -231,7 +243,7 @@ return True result = self._backend.has(obj, value) if result is NotImplemented: - raise PersistError("Can't check %r for containment" % type(obj)) + raise PersistError(f"Can't check {type(obj)!r} for containment") return result def keys(self, path, soft=False, hard=False, weak=False): @@ -240,7 +252,7 @@ return [] result = self._backend.keys(obj) if result is NotImplemented: - raise PersistError("Can't return keys for %s" % type(obj)) + raise PersistError(f"Can't return keys for {type(obj)}") return result def get(self, path, default=None, soft=False, hard=False, weak=False): @@ -308,8 +320,9 @@ if obj is not marker: result = self._backend.remove(obj, elem, isvalue) if result is NotImplemented: - raise PersistError("Can't remove %r from %r" % - (elem, type(obj))) + raise PersistError( + "Can't remove {!r} from {!r}".format(elem, type(obj)), + ) if self._backend.empty(obj): if value is not marker: value = marker @@ -344,7 +357,7 @@ return RootedPersist(self, path) -class RootedPersist(object): +class RootedPersist: """Root a L{Persist}'s tree at a particular branch. This class shares the same interface of L{Persist} and provides a shortcut @@ -408,8 +421,12 @@ oldpath = path_string_to_tuple(oldpath) if isinstance(newpath, StringType): newpath = path_string_to_tuple(newpath) - return self.parent.move(self.root + oldpath, self.root + newpath, - soft, weak) + return self.parent.move( + self.root + oldpath, + self.root + newpath, + soft, + weak, + ) def root_at(self, path): if isinstance(path, StringType): @@ -446,7 +463,7 @@ try: result.append(int(token[1:-1])) except ValueError: - raise PersistError("Invalid path index: %r" % token) + raise PersistError(f"Invalid path index: {token!r}") else: result.append(token.replace(r"\.", ".")) return tuple(result) @@ -456,13 +473,13 @@ result = [] for elem in path: if type(elem) is int: - result[-1] += "[%d]" % elem + result[-1] += f"[{elem:d}]" else: result.append(str(elem).replace(".", r"\.")) return ".".join(result) -class Backend(object): +class Backend: """ Base class for L{Persist} backends implementing hierarchical storage functionality. @@ -572,7 +589,7 @@ def empty(self, obj): """Whether the given node object has no children.""" - return (not obj) + return not obj def has(self, obj, elem): """Whether the given node object contains the given child element.""" @@ -592,16 +609,16 @@ class PickleBackend(Backend): - def __init__(self): from landscape.lib.compat import cPickle + self._pickle = cPickle def new(self): return {} def load(self, filepath): - with open(filepath, 'rb') as fd: + with open(filepath, "rb") as fd: return self._pickle.load(fd) def save(self, filepath, map): @@ -610,9 +627,9 @@ class BPickleBackend(Backend): - def __init__(self): from landscape.lib import bpickle + self._bpickle = bpickle def new(self): @@ -626,4 +643,5 @@ with open(filepath, "wb") as fd: fd.write(self._bpickle.dumps(map)) + # vim:ts=4:sw=4:et diff -Nru landscape-client-23.02/landscape/lib/plugin.py landscape-client-23.08/landscape/lib/plugin.py --- landscape-client-23.02/landscape/lib/plugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/plugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import logging from .format import format_object @@ -9,7 +7,7 @@ """There was an error registering or configuring a plugin.""" -class PluginRegistry(object): +class PluginRegistry: """A central integration point for plugins.""" def __init__(self): @@ -40,7 +38,7 @@ return self._plugin_names[name] -class Plugin(object): +class Plugin: """A convenience for writing plugins. This provides a register method which will set up a bunch of diff -Nru landscape-client-23.02/landscape/lib/process.py landscape-client-23.08/landscape/lib/process.py --- landscape-client-23.02/landscape/lib/process.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/process.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,14 @@ -from __future__ import absolute_import - import logging import os -from datetime import timedelta, datetime +from datetime import datetime +from datetime import timedelta from landscape.lib import sysstats -from landscape.lib.timestamp import to_timestamp from landscape.lib.jiffies import detect_jiffies +from landscape.lib.timestamp import to_timestamp -class ProcessInformation(object): +class ProcessInformation: """ @param proc_dir: The directory to use for process information. @param jiffies: The value to use for jiffies per second. @@ -18,8 +17,13 @@ @param uptime: The uptime value to use (for unit tests only). """ - def __init__(self, proc_dir="/proc", jiffies=None, boot_time=None, - uptime=None): + def __init__( + self, + proc_dir="/proc", + jiffies=None, + boot_time=None, + uptime=None, + ): if boot_time is None: boot_time = sysstats.BootTimes().get_last_boot_time() if boot_time is not None: @@ -68,8 +72,9 @@ for line in file: parts = line.split(":", 1) if parts[0] == "Name": - process_info["name"] = (cmd_line_name.strip() or - parts[1].strip()) + process_info["name"] = ( + cmd_line_name.strip() or parts[1].strip() + ) elif parts[0] == "State": state = parts[1].strip() # In Lucid, capital T is used for both tracing stop @@ -107,27 +112,40 @@ utime = int(parts[13]) stime = int(parts[14]) uptime = self._uptime or sysstats.get_uptime() - pcpu = calculate_pcpu(utime, stime, uptime, - start_time, self._jiffies_per_sec) + pcpu = calculate_pcpu( + utime, + stime, + uptime, + start_time, + self._jiffies_per_sec, + ) process_info["percent-cpu"] = pcpu delta = timedelta(0, start_time // self._jiffies_per_sec) if self._boot_time is None: logging.warning( - "Skipping process (PID %s) without boot time.") + "Skipping process (PID %s) without boot time.", + process_id, + ) return None process_info["start-time"] = to_timestamp( - self._boot_time + delta) + self._boot_time + delta, + ) finally: file.close() - except IOError: + except OSError: # Handle the race that happens when we find a process # which terminates before we open the stat file. return None - assert("pid" in process_info and "state" in process_info and - "name" in process_info and "uid" in process_info and - "gid" in process_info and "start-time" in process_info) + assert ( + "pid" in process_info + and "state" in process_info + and "name" in process_info + and "uid" in process_info + and "gid" in process_info + and "start-time" in process_info + ) return process_info diff -Nru landscape-client-23.02/landscape/lib/reactor.py landscape-client-23.08/landscape/lib/reactor.py --- landscape-client-23.02/landscape/lib/reactor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/reactor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,6 @@ """ Extend the regular Twisted reactor with event-handling features. """ -from __future__ import absolute_import - import logging import time @@ -19,7 +17,7 @@ """Raised when hooking on a reactor incorrectly.""" -class EventID(object): +class EventID: """Unique identifier for an event handler. @param event_type: Name of the event type handled by the handler. @@ -32,7 +30,7 @@ self._pair = pair -class EventHandlingReactorMixin(object): +class EventHandlingReactorMixin: """Fire events identified by strings and register handlers for them. Note that event handlers are executed synchronously when the C{fire} method @@ -43,7 +41,7 @@ """ def __init__(self): - super(EventHandlingReactorMixin, self).__init__() + super().__init__() self._event_handlers = {} def call_on(self, event_type, handler, priority=0): @@ -85,21 +83,34 @@ handlers = list(self._event_handlers.get(event_type, ())) for handler, priority in handlers: try: - logging.debug("Calling %s for %s with priority %d.", - format_object(handler), event_type, priority) + logging.debug( + "Calling %s for %s with priority %d.", + format_object(handler), + event_type, + priority, + ) results.append(handler(*args, **kwargs)) except KeyboardInterrupt: - logging.exception("Keyboard interrupt while running event " - "handler %s for event type %r with " - "args %r %r.", format_object(handler), - event_type, args, kwargs) + logging.exception( + "Keyboard interrupt while running event " + "handler %s for event type %r with " + "args %r %r.", + format_object(handler), + event_type, + args, + kwargs, + ) self.stop() raise except Exception: - logging.exception("Error running event handler %s for " - "event type %r with args %r %r.", - format_object(handler), event_type, - args, kwargs) + logging.exception( + "Error running event handler %s for " + "event type %r with args %r %r.", + format_object(handler), + event_type, + args, + kwargs, + ) logging.debug("Finished firing %s.", event_type) return results @@ -111,11 +122,10 @@ if type(id) is EventID: self._event_handlers[id._event_type].remove(id._pair) else: - raise InvalidID("EventID instance expected, received %r" % id) - + raise InvalidID(f"EventID instance expected, received {id!r}") -class ReactorID(object): +class ReactorID: def __init__(self, timeout): self._timeout = timeout @@ -131,11 +141,12 @@ def __init__(self): from twisted.internet import reactor from twisted.internet.task import LoopingCall + self._LoopingCall = LoopingCall self._reactor = reactor self._cleanup() self.callFromThread = reactor.callFromThread - super(EventHandlingReactor, self).__init__() + super().__init__() def time(self): """Get current time. @@ -208,6 +219,7 @@ @note: Both C{callback} and C{errback} will be executed in the the parent thread. """ + def on_success(result): if callback: return callback(result) diff -Nru landscape-client-23.02/landscape/lib/schema.py landscape-client-23.08/landscape/lib/schema.py --- landscape-client-23.02/landscape/lib/schema.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/schema.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,18 @@ """A schema system. Yes. Another one!""" -from twisted.python.compat import iteritems, unicode, long +from twisted.python.compat import iteritems +from twisted.python.compat import long +from twisted.python.compat import unicode class InvalidError(Exception): """Raised when invalid input is received.""" + pass -class Constant(object): +class Constant: """Something that must be equal to a constant value.""" + def __init__(self, value): self.value = value @@ -20,15 +24,16 @@ pass if value != self.value: - raise InvalidError("%r != %r" % (value, self.value)) + raise InvalidError(f"{value!r} != {self.value!r}") return value -class Any(object): +class Any: """Something which must apply to any of a number of different schemas. @param schemas: Other schema objects. """ + def __init__(self, *schemas): self.schemas = schemas @@ -42,40 +47,45 @@ return schema.coerce(value) except InvalidError: pass - raise InvalidError("%r did not match any schema in %s" - % (value, self.schemas)) + raise InvalidError( + f"{value!r} did not match any schema in {self.schemas}", + ) -class Bool(object): +class Bool: """Something that must be a C{bool}.""" + def coerce(self, value): if not isinstance(value, bool): - raise InvalidError("%r is not a bool" % (value,)) + raise InvalidError(f"{value!r} is not a bool") return value -class Int(object): +class Int: """Something that must be an C{int} or C{long}.""" + def coerce(self, value): if not isinstance(value, (int, long)): - raise InvalidError("%r isn't an int or long" % (value,)) + raise InvalidError(f"{value!r} isn't an int or long") return value -class Float(object): +class Float: """Something that must be an C{int}, C{long}, or C{float}.""" + def coerce(self, value): if not isinstance(value, (int, long, float)): - raise InvalidError("%r isn't a float" % (value,)) + raise InvalidError(f"{value!r} isn't a float") return value -class Bytes(object): +class Bytes: """A binary string. If the value is a Python3 str (unicode), it will be automatically encoded. """ + def coerce(self, value): if isinstance(value, bytes): return value @@ -83,10 +93,10 @@ if isinstance(value, str): return value.encode() - raise InvalidError("%r isn't a bytestring" % value) + raise InvalidError(f"{value!r} isn't a bytestring") -class Unicode(object): +class Unicode: """Something that must be a C{unicode}. If the value is a C{str}, it will automatically be decoded. @@ -102,35 +112,38 @@ try: value = value.decode(self.encoding) except UnicodeDecodeError as e: - raise InvalidError("%r can't be decoded: %s" % (value, str(e))) + raise InvalidError( + "{!r} can't be decoded: {}".format(value, str(e)), + ) if not isinstance(value, unicode): - raise InvalidError("%r isn't a unicode" % (value,)) + raise InvalidError(f"{value!r} isn't a unicode") return value -class List(object): +class List: """Something which must be a C{list}. @param schema: The schema that all values of the list must match. """ + def __init__(self, schema): self.schema = schema def coerce(self, value): if not isinstance(value, list): - raise InvalidError("%r is not a list" % (value,)) + raise InvalidError(f"{value!r} is not a list") new_list = list(value) for i, subvalue in enumerate(value): try: new_list[i] = self.schema.coerce(subvalue) except InvalidError as e: raise InvalidError( - "%r could not coerce with %s: %s" - % (subvalue, self.schema, e)) + f"{subvalue!r} could not coerce with {self.schema}: {e}", + ) return new_list -class Tuple(object): +class Tuple: """Something which must be a fixed-length tuple. @param schema: A sequence of schemas, which will be applied to @@ -142,53 +155,67 @@ def coerce(self, value): if not isinstance(value, tuple): - raise InvalidError("%r is not a tuple" % (value,)) + raise InvalidError(f"{value!r} is not a tuple") if len(value) != len(self.schema): - raise InvalidError("Need %s items, got %s in %r" - % (len(self.schema), len(value), value)) + raise InvalidError( + f"Need {len(self.schema)} items, " + f"got {len(value)} in {value!r}", + ) new_value = [] for schema, value in zip(self.schema, value): new_value.append(schema.coerce(value)) return tuple(new_value) -class KeyDict(object): +class KeyDict: """Something which must be a C{dict} with defined keys. The keys must be constant and the values must match a per-key schema. + If strict, extra keys cause an exception during coercion. @param schema: A dict mapping keys to schemas that the values of those keys must match. """ - def __init__(self, schema, optional=None): + + def __init__(self, schema, optional=None, strict=True): if optional is None: optional = [] self.optional = set(optional) self.schema = schema + self._strict = strict def coerce(self, value): new_dict = {} if not isinstance(value, dict): - raise InvalidError("%r is not a dict." % (value,)) + raise InvalidError(f"{value!r} is not a dict.") + for k, v in iteritems(value): - if k not in self.schema: - raise InvalidError("%r is not a valid key as per %r" - % (k, self.schema)) + unknown_key = k not in self.schema + + if unknown_key and self._strict: + raise InvalidError( + f"{k!r} is not a valid key as per {self.schema!r}", + ) + elif unknown_key: + # We are in non-strict mode, so we ignore unknown keys. + continue + try: new_dict[k] = self.schema[k].coerce(v) except InvalidError as e: raise InvalidError( - "Value of %r key of dict %r could not coerce with %s: %s" - % (k, value, self.schema[k], e)) + f"Value of {k!r} key of dict {value!r} could not coerce " + f"with {self.schema[k]}: {e}", + ) new_keys = set(new_dict.keys()) required_keys = set(self.schema.keys()) - self.optional missing = required_keys - new_keys if missing: - raise InvalidError("Missing keys %s" % (missing,)) + raise InvalidError(f"Missing keys {missing}") return new_dict -class Dict(object): +class Dict: """Something which must be a C{dict} with arbitrary keys. @param key_schema: The schema that keys must match. @@ -201,7 +228,7 @@ def coerce(self, value): if not isinstance(value, dict): - raise InvalidError("%r is not a dict." % (value,)) + raise InvalidError(f"{value!r} is not a dict.") new_dict = {} for k, v in value.items(): new_dict[self.key_schema.coerce(k)] = self.value_schema.coerce(v) diff -Nru landscape-client-23.02/landscape/lib/scriptcontent.py landscape-client-23.08/landscape/lib/scriptcontent.py --- landscape-client-23.02/landscape/lib/scriptcontent.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/scriptcontent.py 2023-08-17 21:19:32.000000000 +0000 @@ -5,7 +5,7 @@ """ Concatenates a interpreter and script into an executable script. """ - return u"#!{}\n{}".format(interpreter or u"", code or u"") + return "#!{}\n{}".format(interpreter or "", code or "") def generate_script_hash(script): diff -Nru landscape-client-23.02/landscape/lib/sequenceranges.py landscape-client-23.08/landscape/lib/sequenceranges.py --- landscape-client-23.02/landscape/lib/sequenceranges.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/sequenceranges.py 2023-08-17 21:19:32.000000000 +0000 @@ -5,7 +5,7 @@ """Raised when the sequence isn't proper for translation to ranges.""" -class SequenceRanges(object): +class SequenceRanges: """High level interface to ranges. A ranges list represent a sequence of ordered and non-repeating @@ -50,8 +50,8 @@ if index < len(self._ranges): test = self._ranges[index] if isinstance(test, tuple): - return (test[0] <= item <= test[1]) - return (test == item) + return test[0] <= item <= test[1] + return test == item return False def add(self, item): @@ -79,10 +79,11 @@ else: if item is not None and item <= range_stop: if item < range_stop: - raise SequenceError("Sequence is unordered (%r < %r)" % - (item, range_stop)) + raise SequenceError( + f"Sequence is unordered ({item!r} < {range_stop!r})", + ) else: - raise SequenceError("Found duplicated item (%r)" % (item,)) + raise SequenceError(f"Found duplicated item ({item!r})") if range_stop == range_start: yield range_start elif range_stop == range_start + 1: @@ -185,9 +186,9 @@ if item >= range_start: # Handle right side of the range (and replace original item). if range_stop < item + 3: - ranges[index:index + 1] = range(item + 1, range_stop + 1) + ranges[index : index + 1] = range(item + 1, range_stop + 1) else: - ranges[index:index + 1] = ((item + 1, range_stop),) + ranges[index : index + 1] = ((item + 1, range_stop),) # Handle left side of the range. if range_start > item - 3: diff -Nru landscape-client-23.02/landscape/lib/store.py landscape-client-23.08/landscape/lib/store.py --- landscape-client-23.02/landscape/lib/store.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/store.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,4 +1,5 @@ """Functions used by all sqlite-backed stores.""" +from functools import wraps try: import sqlite3 @@ -16,6 +17,7 @@ cursor closing with this decorator. """ + @wraps(method) def inner(self, *args, **kwargs): if not self._db: # Create the database connection only when we start to actually @@ -36,4 +38,5 @@ self._db.rollback() raise return result + return inner diff -Nru landscape-client-23.02/landscape/lib/sysstats.py landscape-client-23.08/landscape/lib/sysstats.py --- landscape-client-23.02/landscape/lib/sysstats.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/sysstats.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,7 @@ -from datetime import datetime -import os import os.path import struct import time +from datetime import datetime from twisted.internet.utils import getProcessOutputAndValue @@ -14,20 +13,26 @@ """Raised when an external command returns a non-zero status.""" -class MemoryStats(object): - +class MemoryStats: def __init__(self, filename="/proc/meminfo"): data = {} for line in open(filename): if ":" in line: key, value = line.split(":", 1) - if key in ["MemTotal", "SwapFree", "SwapTotal", "MemFree", - "Buffers", "Cached"]: + if key in [ + "MemTotal", + "SwapFree", + "SwapTotal", + "MemFree", + "Buffers", + "Cached", + ]: data[key] = int(value.split()[0]) self.total_memory = data["MemTotal"] // 1024 - self.free_memory = (data["MemFree"] + data["Buffers"] + - data["Cached"]) // 1024 + self.free_memory = ( + data["MemFree"] + data["Buffers"] + data["Cached"] + ) // 1024 self.total_swap = data["SwapTotal"] // 1024 self.free_swap = data["SwapFree"] // 1024 @@ -69,19 +74,20 @@ def parse_output(args): stdout_data, stderr_data, status = args if status != 0: - raise CommandError(stderr_data.decode('ascii')) + raise CommandError(stderr_data.decode("ascii")) first_line = stdout_data.split(b"\n", 1)[0] - first_line = first_line.decode('ascii') + first_line = first_line.decode("ascii") return sorted(set(first_line.split())) + return result.addCallback(parse_output) -def get_uptime(uptime_file=u"/proc/uptime"): +def get_uptime(uptime_file="/proc/uptime"): """ This parses a file in /proc/uptime format and returns a floating point version of the first value (the actual uptime). """ - with open(uptime_file, 'r') as ufile: + with open(uptime_file, "r") as ufile: data = ufile.readline() up, idle = data.split() return float(up) @@ -98,7 +104,7 @@ yield ThermalZone(thermal_zone_path, zone_name) -class ThermalZone(object): +class ThermalZone: temperature = None temperature_value = None @@ -114,12 +120,14 @@ line = f.readline() try: self.temperature_value = int(line.strip()) / 1000.0 - self.temperature_unit = 'C' - self.temperature = '{:.1f} {}'.format( - self.temperature_value, self.temperature_unit) + self.temperature_unit = "C" + self.temperature = "{:.1f} {}".format( + self.temperature_value, + self.temperature_unit, + ) except ValueError: pass - except EnvironmentError: + except OSError: pass else: temperature_path = os.path.join(self.path, "temperature") @@ -135,7 +143,7 @@ pass -class LoginInfo(object): +class LoginInfo: """Information about a login session gathered from wtmp or utmp.""" # FIXME This format string works fine on my hardware, but *may* be @@ -165,7 +173,7 @@ return bytestring.strip(b"\0").decode("utf-8") -class LoginInfoReader(object): +class LoginInfoReader: """Reader parses C{/var/log/wtmp} and/or C{/var/run/utmp} files. @file: Initialize the reader with an open file. @@ -195,12 +203,16 @@ return None -class BootTimes(object): +class BootTimes: _last_boot = None _last_shutdown = None - def __init__(self, filename="/var/log/wtmp", - boots_newer_than=0, shutdowns_newer_than=0): + def __init__( + self, + filename="/var/log/wtmp", + boots_newer_than=0, + shutdowns_newer_than=0, + ): self._filename = filename self._boots_newer_than = boots_newer_than self._shutdowns_newer_than = shutdowns_newer_than @@ -216,12 +228,16 @@ for info in reader.login_info(): if info.tty_device.startswith("~"): timestamp = to_timestamp(info.entry_time) - if (info.username == "reboot" and - timestamp > self._last_boot): + if ( + info.username == "reboot" + and timestamp > self._last_boot + ): reboot_times.append(timestamp) self._last_boot = timestamp - elif (info.username == "shutdown" and - timestamp > self._last_shutdown): + elif ( + info.username == "shutdown" + and timestamp > self._last_shutdown + ): shutdown_times.append(timestamp) self._last_shutdown = timestamp return reboot_times, shutdown_times diff -Nru landscape-client-23.02/landscape/lib/testing.py landscape-client-23.08/landscape/lib/testing.py --- landscape-client-23.02/landscape/lib/testing.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/testing.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,5 @@ -from __future__ import absolute_import - import bisect import logging -import os import os.path import re import shutil @@ -10,18 +7,20 @@ import sys import tempfile import unittest +from logging import ERROR +from logging import Formatter +from logging import Handler - -from logging import Handler, ERROR, Formatter -from twisted.trial.unittest import TestCase -from twisted.python.compat import StringType as basestring -from landscape.lib.compat import _PY3 -from twisted.python.failure import Failure from twisted.internet.defer import Deferred from twisted.internet.error import ConnectError +from twisted.python.compat import StringType +from twisted.python.failure import Failure +from twisted.trial.unittest import TestCase +from landscape.lib.compat import _PY3 from landscape.lib.compat import ConfigParser -from landscape.lib.compat import stringio, cstringio +from landscape.lib.compat import cstringio +from landscape.lib.compat import stringio from landscape.lib.config import BaseConfiguration from landscape.lib.reactor import EventHandlingReactorMixin from landscape.lib.sysstats import LoginInfo @@ -30,7 +29,7 @@ class CompatTestCase(unittest.TestCase): if not _PY3: - assertCountEqual = TestCase.assertItemsEqual + assertCountEqual = TestCase.assertItemsEqual # noqa: N815 class HelperTestCase(unittest.TestCase): @@ -38,7 +37,7 @@ helpers = [] def setUp(self): - super(HelperTestCase, self).setUp() + super().setUp() self._helper_instances = [] if LogKeeperHelper not in self.helpers: @@ -58,18 +57,26 @@ if hasattr(helper, "tear_down"): helper.tear_down(self) - super(HelperTestCase, self).tearDown() - + super().tearDown() -class FSTestCase(object): - def assertFileContent(self, filename, expected_content): +class FSTestCase: + def assertFileContent(self, filename, expected_content): # noqa: N802 with open(filename, "rb") as fd: actual_content = fd.read() self.assertEqual(expected_content, actual_content) - def makeFile(self, content=None, suffix="", prefix="tmp", basename=None, - dirname=None, path=None, mode="w", backupsuffix=None): + def makeFile( # noqa: N802 + self, + content=None, + suffix="", + prefix="tmp", + basename=None, + dirname=None, + path=None, + mode="w", + backupsuffix=None, + ): """Create a temporary file and return the path to it. @param content: Initial content for the file. @@ -101,6 +108,7 @@ os.remove(path + backupsuffix) except OSError: pass + self.addCleanup(remove_backup) return path @@ -118,7 +126,13 @@ except OSError: pass - def makeDir(self, suffix="", prefix="tmp", dirname=None, path=None): + def makeDir( # noqa: N802 + self, + suffix="", + prefix="tmp", + dirname=None, + path=None, + ): """Create a temporary directory and return the path to it. @param suffix: Suffix to be given to the file's basename. @@ -139,18 +153,14 @@ if bindir is None: bindir = self.makeDir() config.bindir = bindir - filename = self.makeFile( - content, - dirname=bindir, - basename=name) + filename = self.makeFile(content, dirname=bindir, basename=name) os.chmod(filename, 0o755) return filename class ConfigTestCase(FSTestCase): - def setUp(self): - super(ConfigTestCase, self).setUp() + super().setUp() self._old_config_filenames = BaseConfiguration.default_config_filenames BaseConfiguration.default_config_filenames = [self.makeFile("")] @@ -158,9 +168,9 @@ def tearDown(self): BaseConfiguration.default_config_filenames = self._old_config_filenames - super(ConfigTestCase, self).tearDown() + super().tearDown() - def assertConfigEqual(self, first, second): + def assertConfigEqual(self, first, second): # noqa: N802 """ Compare two configuration files for equality. The order of parameters and comments may be different but the actual parameters and sections @@ -174,16 +184,19 @@ second_parser = ConfigParser() second_parser.readfp(second_fp) - self.assertEqual(set(first_parser.sections()), - set(second_parser.sections())) + self.assertEqual( + set(first_parser.sections()), + set(second_parser.sections()), + ) for section in first_parser.sections(): - self.assertEqual(dict(first_parser.items(section)), - dict(second_parser.items(section))) + self.assertEqual( + dict(first_parser.items(section)), + dict(second_parser.items(section)), + ) class TwistedTestCase(TestCase): - - def successResultOf(self, deferred): + def successResultOf(self, deferred): # noqa: N802 """See C{twisted.trial._synctest._Assertions.successResultOf}. This is a copy of the original method, which is available only @@ -193,16 +206,18 @@ deferred.addBoth(result.append) if not result: self.fail( - "Success result expected on %r, found no result instead" % ( - deferred,)) + f"Success result expected on {deferred!r}, " + "found no result instead", + ) elif isinstance(result[0], Failure): self.fail( - "Success result expected on %r, " - "found failure result (%r) instead" % (deferred, result[0])) + f"Success result expected on {deferred!r}, " + f"found failure result ({result[0]!r}) instead", + ) else: return result[0] - def failureResultOf(self, deferred): + def failureResultOf(self, deferred): # noqa: N802 """See C{twisted.trial._synctest._Assertions.failureResultOf}. This is a copy of the original method, which is available only @@ -212,16 +227,18 @@ deferred.addBoth(result.append) if not result: self.fail( - "Failure result expected on %r, found no result instead" % ( - deferred,)) + f"Failure result expected on {deferred!r}, " + "found no result instead", + ) elif not isinstance(result[0], Failure): self.fail( - "Failure result expected on %r, " - "found success result (%r) instead" % (deferred, result[0])) + f"Failure result expected on {deferred!r}, " + f"found success result ({result[0]!r}) instead", + ) else: return result[0] - def assertNoResult(self, deferred): + def assertNoResult(self, deferred): # noqa: N802 """See C{twisted.trial._synctest._Assertions.assertNoResult}. This is a copy of the original method, which is available only @@ -231,19 +248,21 @@ deferred.addBoth(result.append) if result: self.fail( - "No result expected on %r, found %r instead" % ( - deferred, result[0])) + f"No result expected on {deferred!r}, " + f"found {result[0]!r} instead", + ) - def assertDeferredSucceeded(self, deferred): + def assertDeferredSucceeded(self, deferred): # noqa: N802 self.assertTrue(isinstance(deferred, Deferred)) called = [] def callback(result): called.append(True) + deferred.addCallback(callback) self.assertTrue(called) - def assertSuccess(self, deferred, result=None): + def assertSuccess(self, deferred, result=None): # noqa: N802 """ Assert that the given C{deferred} results in the given C{result}. """ @@ -252,7 +271,6 @@ class ErrorHandler(Handler): - def __init__(self, *args, **kwargs): Handler.__init__(self, *args, **kwargs) self.errors = [] @@ -263,7 +281,6 @@ class LoggedErrorsError(Exception): - def __str__(self): out = "The following errors were logged\n" formatter = Formatter() @@ -272,7 +289,7 @@ return out -class LogKeeperHelper(object): +class LogKeeperHelper: """Record logging information. Puts a 'logfile' attribute on your test case, which is a StringIO @@ -287,7 +304,7 @@ test_case.logger = logger = logging.getLogger() test_case.logfile = cstringio() handler = logging.StreamHandler(test_case.logfile) - format = ("%(levelname)8s: %(message)s") + format = "%(levelname)8s: %(message)s" handler.setFormatter(logging.Formatter(format)) self.old_handlers = logger.handlers self.old_level = logger.level @@ -301,9 +318,11 @@ errors = [] for record in self.error_handler.errors: for ignored_type in self.ignored_exception_types: - if (record.exc_info and record.exc_info[0] and - issubclass(record.exc_info[0], ignored_type) - ): + if ( + record.exc_info + and record.exc_info[0] + and issubclass(record.exc_info[0], ignored_type) + ): break else: for ignored_regex in self.ignored_exception_regexes: @@ -315,14 +334,13 @@ raise LoggedErrorsError(errors) def ignore_errors(self, type_or_regex): - if isinstance(type_or_regex, basestring): + if isinstance(type_or_regex, StringType): self.ignored_exception_regexes.append(re.compile(type_or_regex)) else: self.ignored_exception_types.append(type_or_regex) -class EnvironSnapshot(object): - +class EnvironSnapshot: def __init__(self): self._snapshot = os.environ.copy() @@ -333,8 +351,7 @@ del os.environ[key] -class EnvironSaverHelper(object): - +class EnvironSaverHelper: def set_up(self, test_case): self._snapshot = EnvironSnapshot() @@ -342,8 +359,7 @@ self._snapshot.restore() -class MockPopen(object): - +class MockPopen: def __init__(self, output, return_codes=None, err_out=""): self.output = output self.err_out = err_out @@ -373,8 +389,7 @@ return self.return_codes.pop(0) -class StandardIOHelper(object): - +class StandardIOHelper: def set_up(self, test_case): test_case.old_stdout = sys.stdout test_case.old_stdin = sys.stdin @@ -388,23 +403,45 @@ sys.stdin = test_case.old_stdin -def append_login_data(filename, login_type=0, pid=0, tty_device="/dev/", - id="", username="", hostname="", termination_status=0, - exit_status=0, session_id=0, entry_time_seconds=0, - entry_time_milliseconds=0, - remote_ip_address=[0, 0, 0, 0]): +def append_login_data( + filename, + login_type=0, + pid=0, + tty_device="/dev/", + id="", + username="", + hostname="", + termination_status=0, + exit_status=0, + session_id=0, + entry_time_seconds=0, + entry_time_milliseconds=0, + remote_ip_address=[0, 0, 0, 0], +): """Append binary login data to the specified filename.""" file = open(filename, "ab") try: - file.write(struct.pack(LoginInfo.RAW_FORMAT, login_type, pid, - tty_device.encode("utf-8"), id.encode("utf-8"), - username.encode("utf-8"), - hostname.encode("utf-8"), - termination_status, exit_status, session_id, - entry_time_seconds, entry_time_milliseconds, - remote_ip_address[0], remote_ip_address[1], - remote_ip_address[2], remote_ip_address[3], - b"")) + file.write( + struct.pack( + LoginInfo.RAW_FORMAT, + login_type, + pid, + tty_device.encode("utf-8"), + id.encode("utf-8"), + username.encode("utf-8"), + hostname.encode("utf-8"), + termination_status, + exit_status, + session_id, + entry_time_seconds, + entry_time_milliseconds, + remote_ip_address[0], + remote_ip_address[1], + remote_ip_address[2], + remote_ip_address[3], + b"", + ), + ) finally: file.close() @@ -421,7 +458,7 @@ return mock_counter(100) -class StubProcessFactory(object): +class StubProcessFactory: """ A L{IReactorProcess} provider which records L{spawnProcess} calls and allows tests to get at the protocol. @@ -430,26 +467,47 @@ def __init__(self): self.spawns = [] - def spawnProcess(self, protocol, executable, args=(), env={}, path=None, - uid=None, gid=None, usePTY=0, childFDs=None): - self.spawns.append((protocol, executable, args, - env, path, uid, gid, usePTY, childFDs)) + def spawnProcess( # noqa: N802 + self, + protocol, + executable, + args=(), + env={}, + path=None, + uid=None, + gid=None, + usePTY=0, + childFDs=None, + ): + self.spawns.append( + ( + protocol, + executable, + args, + env, + path, + uid, + gid, + usePTY, + childFDs, + ), + ) -class DummyProcess(object): +class DummyProcess: """A process (transport) that doesn't do anything.""" def __init__(self): self.signals = [] - def signalProcess(self, signal): + def signalProcess(self, signal): # noqa: N802 self.signals.append(signal) - def closeChildFD(self, fd): + def closeChildFD(self, fd): # noqa: N802 pass -class ProcessDataBuilder(object): +class ProcessDataBuilder: """Builder creates sample data for the process info plugin to consume. @param sample_dir: The directory for sample data. @@ -466,9 +524,18 @@ def __init__(self, sample_dir): self._sample_dir = sample_dir - def create_data(self, process_id, state, uid, gid, - started_after_boot=0, process_name=None, - generate_cmd_line=True, stat_data=None, vmsize=11676): + def create_data( + self, + process_id, + state, + uid, + gid, + started_after_boot=0, + process_name=None, + generate_cmd_line=True, + stat_data=None, + vmsize=11676, + ): """Creates sample data for a process. @@ -481,19 +548,19 @@ kernel process) @param stat_data: Array of items to write to the /proc//stat file. """ - sample_data = """ -Name: %(process_name)s -State: %(state)s + sample_data = f""" +Name: {process_name[:15]} +State: {state} Tgid: 24759 Pid: 24759 PPid: 17238 TracerPid: 0 -Uid: %(uid)d 0 0 0 -Gid: %(gid)d 0 0 0 +Uid: {uid:d} 0 0 0 +Gid: {gid:d} 0 0 0 FDSize: 256 Groups: 4 20 24 25 29 30 44 46 106 110 112 1000 VmPeak: 11680 kB -VmSize: %(vmsize)d kB +VmSize: {vmsize:d} kB VmLck: 0 kB VmHWM: 6928 kB VmRSS: 6924 kB @@ -512,8 +579,7 @@ CapInh: 0000000000000000 CapPrm: 0000000000000000 CapEff: 0000000000000000 -""" % ({"process_name": process_name[:15], "state": state, "uid": uid, - "gid": gid, "vmsize": vmsize}) # noqa +""" process_dir = os.path.join(self._sample_dir, str(process_id)) os.mkdir(process_dir) filename = os.path.join(process_dir, "status") @@ -524,9 +590,9 @@ finally: file.close() if stat_data is None: - stat_data = """\ -0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 %d\ -""" % (started_after_boot,) + stat_data = f"""\ +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 {started_after_boot:d}\ +""" filename = os.path.join(process_dir, "stat") file = open(filename, "w+") @@ -537,8 +603,10 @@ if generate_cmd_line: sample_data = """\ -/usr/sbin/%(process_name)s\0--pid-file\0/var/run/%(process_name)s.pid\0 -""" % {"process_name": process_name} +/usr/sbin/{process_name}\0--pid-file\0/var/run/{process_name}.pid\0 +""".format( + process_name=process_name, + ) else: sample_data = "" filename = os.path.join(process_dir, "cmdline") @@ -555,8 +623,7 @@ shutil.rmtree(process_dir) -class FakeReactorID(object): - +class FakeReactorID: def __init__(self, data): self.active = True self._data = data @@ -575,13 +642,14 @@ around Unix sockets), and implement a fake version C{listen_unix}, but this hasn't been done yet. """ + # XXX probably this shouldn't be a class attribute, but we need client-side # FakeReactor instaces to be aware of listening sockets created by # server-side FakeReactor instances. _socket_paths = {} def __init__(self): - super(FakeReactor, self).__init__() + super().__init__() self._current_time = 0 self._calls = [] self.hosts = {} @@ -590,6 +658,7 @@ # XXX we need a reference to the Twisted reactor as well because # some tests use it from twisted.internet import reactor + self._reactor = reactor def time(self): @@ -610,7 +679,6 @@ self._calls.insert(index, call) def call_every(self, seconds, f, *args, **kwargs): - def fake(): # update the call so that cancellation will continue # working with the same ID. And do it *before* the call @@ -622,6 +690,7 @@ if call.active: self.cancel_call(call) raise + call = self.call_later(seconds, fake) return call @@ -631,7 +700,7 @@ self._calls.remove(id._data) id.active = False else: - super(FakeReactor, self).cancel_call(id) + super().cancel_call(id) def call_when_running(self, f): # Just schedule a call that will be kicked by the run() method. @@ -657,10 +726,8 @@ self._run_threaded_callbacks() def listen_unix(self, socket_path, factory): - - class FakePort(object): - - def stopListening(oself): + class FakePort: + def stopListening(oself): # noqa: N802,N805 self._socket_paths.pop(socket_path) self._socket_paths[socket_path] = factory @@ -669,6 +736,7 @@ def connect_unix(self, path, factory): server = self._socket_paths.get(path) from landscape.lib.tests.test_amp import FakeConnector + if server: connector = FakeConnector(factory, server) connector.connect() @@ -697,8 +765,9 @@ advancing time and triggering the relevant scheduled calls (see also C{call_later} and C{call_every}). """ - while (self._calls and - self._calls[0][0] <= self._current_time + seconds): + while ( + self._calls and self._calls[0][0] <= self._current_time + seconds + ): call = self._calls.pop(0) # If we find a call within the time we're advancing, # before calling it, let's advance the time *just* to diff -Nru landscape-client-23.02/landscape/lib/tests/test_amp.py landscape-client-23.08/landscape/lib/tests/test_amp.py --- landscape-client-23.02/landscape/lib/tests/test_amp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_amp.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,24 @@ import unittest from twisted.internet import reactor -from twisted.internet.error import ConnectError, ConnectionDone +from twisted.internet.defer import Deferred +from twisted.internet.defer import inlineCallbacks +from twisted.internet.error import ConnectError +from twisted.internet.error import ConnectionDone from twisted.internet.task import Clock -from twisted.internet.defer import Deferred, inlineCallbacks from twisted.python.failure import Failure from landscape.lib import testing -from landscape.lib.amp import ( - MethodCallError, MethodCallServerProtocol, MethodCallClientProtocol, - MethodCallServerFactory, MethodCallClientFactory, RemoteObject, - MethodCallSender) +from landscape.lib.amp import MethodCallClientFactory +from landscape.lib.amp import MethodCallClientProtocol +from landscape.lib.amp import MethodCallError +from landscape.lib.amp import MethodCallSender +from landscape.lib.amp import MethodCallServerFactory +from landscape.lib.amp import MethodCallServerProtocol +from landscape.lib.amp import RemoteObject -class FakeTransport(object): +class FakeTransport: """Accumulate written data into a list.""" def __init__(self, connection): @@ -23,17 +28,17 @@ def write(self, data): self.stream.append(data) - def loseConnection(self): + def loseConnection(self): # noqa: N802 raise NotImplementedError() - def getPeer(self): + def getPeer(self): # noqa: N802 pass - def getHost(self): + def getHost(self): # noqa: N802 pass -class FakeConnection(object): +class FakeConnection: """Simulate a connection between a client and a server protocol.""" def __init__(self, client, server): @@ -62,7 +67,7 @@ break -class FakeConnector(object): +class FakeConnector: """Make L{FakeConnection}s using the given server and client factories.""" def __init__(self, client, server): @@ -75,8 +80,10 @@ return self.client def connect(self): - self.connection = FakeConnection(self.client.buildProtocol(None), - self.server.buildProtocol(None)) + self.connection = FakeConnection( + self.client.buildProtocol(None), + self.server.buildProtocol(None), + ) # XXX Let the client factory be aware of this fake connection, so # it can flush it when needed. This is to workaround AMP not @@ -89,7 +96,7 @@ self.connection.lose(self, Failure(ConnectionDone())) -class DummyObject(object): +class DummyObject: method = None @@ -99,9 +106,8 @@ class MethodCallTest(BaseTestCase): - def setUp(self): - super(MethodCallTest, self).setUp() + super().setUp() self.methods = ["method"] self.object = DummyObject() server = MethodCallServerProtocol(self.object, self.methods) @@ -117,9 +123,11 @@ can't be called. """ self.methods.remove("method") - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) self.connection.flush() self.failureResultOf(deferred).trap(MethodCallError) @@ -129,9 +137,11 @@ with an empty response. """ self.object.method = lambda: None - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) self.connection.flush() self.assertIs(None, self.successResultOf(deferred)) @@ -141,9 +151,11 @@ object method with a return value. """ self.object.method = lambda: "Cool result" - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) self.connection.flush() self.assertEqual("Cool result", self.successResultOf(deferred)) @@ -153,9 +165,11 @@ a response value. """ self.object.method = lambda word: word.capitalize() - deferred = self.sender.send_method_call(method="method", - args=["john"], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=["john"], + kwargs={}, + ) self.connection.flush() self.assertEqual("John", self.successResultOf(deferred)) @@ -164,9 +178,11 @@ The return value of a L{MethodCall} argument can be a boolean. """ self.object.method = lambda word: len(word) < 3 - deferred = self.sender.send_method_call(method="method", - args=["hi"], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=["hi"], + kwargs={}, + ) self.connection.flush() self.assertTrue(self.successResultOf(deferred)) @@ -175,9 +191,11 @@ A connected client can issue a L{MethodCall} with many arguments. """ self.object.method = lambda word1, word2: word1 + word2 - deferred = self.sender.send_method_call(method="method", - args=["We ", "rock"], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=["We ", "rock"], + kwargs={}, + ) self.connection.flush() self.assertEqual("We rock", self.successResultOf(deferred)) @@ -187,9 +205,11 @@ default arguments. """ self.object.method = lambda word, index=0: word[index:].lower() - deferred = self.sender.send_method_call(method="method", - args=["OHH"], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=["OHH"], + kwargs={}, + ) self.connection.flush() self.assertEqual("ohh", self.successResultOf(deferred)) @@ -200,9 +220,11 @@ the caller it will be used in place of the default value """ self.object.method = lambda word, index=0: word[index:].lower() - deferred = self.sender.send_method_call(method="method", - args=["ABC"], - kwargs={"index": 2}) + deferred = self.sender.send_method_call( + method="method", + args=["ABC"], + kwargs={"index": 2}, + ) self.connection.flush() self.assertEqual("c", self.successResultOf(deferred)) @@ -212,10 +234,13 @@ """ # Sort the keys to ensure stable test outcome. self.object.method = lambda d: "".join( - sorted(d.keys()) * sum(d.values())) - deferred = self.sender.send_method_call(method="method", - args=[{"foo": 1, "bar": 2}], - kwargs={}) + sorted(d.keys()) * sum(d.values()), + ) + deferred = self.sender.send_method_call( + method="method", + args=[{"foo": 1, "bar": 2}], + kwargs={}, + ) self.connection.flush() self.assertEqual("barfoobarfoobarfoo", self.successResultOf(deferred)) @@ -224,12 +249,14 @@ Method arguments passed to a MethodCall can be a dict of bytes. """ arg = {b"byte_key": 1} - self.object.method = lambda d: ",".join([ - type(x).__name__ for x in d.keys()]) + self.object.method = lambda d: ",".join( + [type(x).__name__ for x in d.keys()], + ) deferred = self.sender.send_method_call( method="method", args=[arg], - kwargs={}) + kwargs={}, + ) self.connection.flush() # str under python2, bytes under python3 self.assertEqual(type(b"").__name__, self.successResultOf(deferred)) @@ -239,13 +266,16 @@ If the target object method returns an object that can't be serialized, the L{MethodCall} raises an error. """ - class Complex(object): + + class Complex: pass self.object.method = lambda: Complex() - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) self.connection.flush() self.failureResultOf(deferred).trap(MethodCallError) @@ -255,9 +285,11 @@ bigger than the maximum AMP parameter value size. """ self.object.method = lambda word: len(word) == 65535 - deferred = self.sender.send_method_call(method="method", - args=["!" * 65535], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=["!" * 65535], + kwargs={}, + ) self.connection.flush() self.assertTrue(self.successResultOf(deferred)) @@ -267,12 +299,16 @@ method calls with arguments bigger than the maximum AMP value size. """ self.object.method = lambda word: len(word) - deferred1 = self.sender.send_method_call(method="method", - args=["!" * 80000], - kwargs={}) - deferred2 = self.sender.send_method_call(method="method", - args=["*" * 90000], - kwargs={}) + deferred1 = self.sender.send_method_call( + method="method", + args=["!" * 80000], + kwargs={}, + ) + deferred2 = self.sender.send_method_call( + method="method", + args=["*" * 90000], + kwargs={}, + ) self.connection.flush() self.assertEqual(80000, self.successResultOf(deferred1)) @@ -284,9 +320,11 @@ with a L{MethodCallError}. """ self.object.method = lambda a, b: a / b - deferred = self.sender.send_method_call(method="method", - args=[1, 0], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[1, 0], + kwargs={}, + ) self.connection.flush() self.failureResultOf(deferred).trap(MethodCallError) @@ -298,9 +336,11 @@ self.object.deferred = Deferred() self.object.method = lambda: self.object.deferred result = [] - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) deferred.addCallback(result.append) self.connection.flush() @@ -323,9 +363,11 @@ self.object.deferred = Deferred() self.object.method = lambda: self.object.deferred result = [] - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) deferred.addErrback(result.append) self.connection.flush() @@ -348,9 +390,11 @@ """ self.object.method = lambda: Deferred() result = [] - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) deferred.addErrback(result.append) self.clock.advance(60) @@ -366,9 +410,11 @@ self.object.deferred = Deferred() self.object.method = lambda: self.object.deferred result = [] - deferred = self.sender.send_method_call(method="method", - args=[], - kwargs={}) + deferred = self.sender.send_method_call( + method="method", + args=[], + kwargs={}, + ) deferred.addErrback(result.append) self.clock.advance(60) @@ -379,9 +425,8 @@ class RemoteObjectTest(BaseTestCase): - def setUp(self): - super(RemoteObjectTest, self).setUp() + super().setUp() self.methods = ["method"] self.object = DummyObject() self.clock = Clock() @@ -486,9 +531,8 @@ class MethodCallClientFactoryTest(BaseTestCase): - def setUp(self): - super(MethodCallClientFactoryTest, self).setUp() + super().setUp() self.clock = Clock() self.factory = MethodCallClientFactory(self.clock) @@ -548,7 +592,8 @@ The L{MethodCallClientFactory} keeps trying to connect if maxRetries is not reached. """ - class FakeConnector(object): + + class FakeConnector: called = False def connect(self): @@ -580,9 +625,8 @@ class MethodCallFunctionalTest(BaseTestCase): - def setUp(self): - super(MethodCallFunctionalTest, self).setUp() + super().setUp() self.methods = ["method"] self.object = DummyObject() self.object.method = lambda word: word.capitalize() @@ -592,7 +636,7 @@ self.port = reactor.listenUNIX(self.socket, self.server) def tearDown(self): - super(MethodCallFunctionalTest, self).tearDown() + super().tearDown() self.port.stopListening() @inlineCallbacks diff -Nru landscape-client-23.02/landscape/lib/tests/test_backoff.py landscape-client-23.08/landscape/lib/tests/test_backoff.py --- landscape-client-23.02/landscape/lib/tests/test_backoff.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_backoff.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,10 @@ -from landscape.lib.backoff import ExponentialBackoff from landscape.client.tests.helpers import LandscapeTest +from landscape.lib.backoff import ExponentialBackoff class TestBackoff(LandscapeTest): - def test_increase(self): - '''Test the delay values start correctly and double''' + """Test the delay values start correctly and double""" backoff_counter = ExponentialBackoff(5, 10) backoff_counter.increase() self.assertEqual(backoff_counter.get_delay(), 5) @@ -13,7 +12,7 @@ self.assertEqual(backoff_counter.get_delay(), 10) def test_min(self): - '''Test the count and the delay never go below zero''' + """Test the count and the delay never go below zero""" backoff_counter = ExponentialBackoff(1, 5) for _ in range(10): backoff_counter.decrease() @@ -22,14 +21,14 @@ self.assertEqual(backoff_counter._error_count, 0) def test_max(self): - '''Test the delay never goes above max''' + """Test the delay never goes above max""" backoff_counter = ExponentialBackoff(1, 5) for _ in range(10): backoff_counter.increase() self.assertEqual(backoff_counter.get_delay(), 5) def test_decreased_when_maxed(self): - '''Test the delay goes down one step when maxed''' + """Test the delay goes down one step when maxed""" backoff_counter = ExponentialBackoff(1, 5) for _ in range(10): backoff_counter.increase() diff -Nru landscape-client-23.02/landscape/lib/tests/test_bootstrap.py landscape-client-23.08/landscape/lib/tests/test_bootstrap.py --- landscape-client-23.02/landscape/lib/tests/test_bootstrap.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_bootstrap.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,13 @@ import os import unittest - -from mock import patch, Mock +from unittest.mock import Mock +from unittest.mock import patch from landscape.lib import testing -from landscape.lib.bootstrap import ( - BootstrapPath, BootstrapFile, BootstrapDirectory, BootstrapList) +from landscape.lib.bootstrap import BootstrapDirectory +from landscape.lib.bootstrap import BootstrapFile +from landscape.lib.bootstrap import BootstrapList +from landscape.lib.bootstrap import BootstrapPath class BaseTestCase(testing.FSTestCase, unittest.TestCase): @@ -17,7 +19,7 @@ bootstrap_class = BootstrapPath def setUp(self): - super(BootstrapPathTest, self).setUp() + super().setUp() self.dirname = self.makeDir() self.path = os.path.join(self.dirname, "$my_var") self.real_path = os.path.join(self.dirname, "my_var_value") @@ -111,7 +113,6 @@ class BootstrapFileTest(BootstrapCreationTest): - def test_creation_wont_overwrite(self): filename = self.makeFile("CONTENT") file = self.bootstrap_class(filename) @@ -139,17 +140,21 @@ class BootstrapListTest(BaseTestCase): - def test_creation(self): dirname = self.makeDir() - list = BootstrapList([BootstrapFile("$dirname/filename"), - BootstrapDirectory("$dirname/dirname"), - BootstrapFile("$dirname/dirname/filename")]) + list = BootstrapList( + [ + BootstrapFile("$dirname/filename"), + BootstrapDirectory("$dirname/dirname"), + BootstrapFile("$dirname/dirname/filename"), + ], + ) list.bootstrap(dirname=dirname) self.assertTrue(os.path.isfile(os.path.join(dirname, "filename"))) self.assertTrue(os.path.isdir(os.path.join(dirname, "dirname"))) - self.assertTrue(os.path.isfile(os.path.join(dirname, - "dirname/filename"))) + self.assertTrue( + os.path.isfile(os.path.join(dirname, "dirname/filename")), + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_bpickle.py landscape-client-23.08/landscape/lib/tests/test_bpickle.py --- landscape-client-23.02/landscape/lib/tests/test_bpickle.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_bpickle.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,7 +4,6 @@ class BPickleTest(unittest.TestCase): - def test_int(self): self.assertEqual(bpickle.loads(bpickle.dumps(1)), 1) @@ -17,36 +16,35 @@ self.assertAlmostEquals(bpickle.loads(bpickle.dumps(number)), number) def test_bytes(self): - self.assertEqual(bpickle.loads(bpickle.dumps(b'foo')), b'foo') + self.assertEqual(bpickle.loads(bpickle.dumps(b"foo")), b"foo") def test_string(self): - self.assertEqual(bpickle.loads(bpickle.dumps('foo')), 'foo') + self.assertEqual(bpickle.loads(bpickle.dumps("foo")), "foo") def test_list(self): - self.assertEqual(bpickle.loads(bpickle.dumps([1, 2, 'hello', 3.0])), - [1, 2, 'hello', 3.0]) + self.assertEqual( + bpickle.loads(bpickle.dumps([1, 2, "hello", 3.0])), + [1, 2, "hello", 3.0], + ) def test_tuple(self): - data = bpickle.dumps((1, [], 2, 'hello', 3.0)) - self.assertEqual(bpickle.loads(data), - (1, [], 2, 'hello', 3.0)) + data = bpickle.dumps((1, [], 2, "hello", 3.0)) + self.assertEqual(bpickle.loads(data), (1, [], 2, "hello", 3.0)) def test_none(self): self.assertEqual(bpickle.loads(bpickle.dumps(None)), None) def test_unicode(self): - self.assertEqual(bpickle.loads(bpickle.dumps(u'\xc0')), u'\xc0') + self.assertEqual(bpickle.loads(bpickle.dumps("\xc0")), "\xc0") def test_bool(self): self.assertEqual(bpickle.loads(bpickle.dumps(True)), True) def test_dict(self): dumped_tostr = bpickle.dumps({True: "hello"}) - self.assertEqual(bpickle.loads(dumped_tostr), - {True: "hello"}) + self.assertEqual(bpickle.loads(dumped_tostr), {True: "hello"}) dumped_tobool = bpickle.dumps({True: False}) - self.assertEqual(bpickle.loads(dumped_tobool), - {True: False}) + self.assertEqual(bpickle.loads(dumped_tobool), {True: False}) def test_dict_bytes_keys(self): """Check loading dict bytes keys without reinterpreting.""" diff -Nru landscape-client-23.02/landscape/lib/tests/test_cloud.py landscape-client-23.08/landscape/lib/tests/test_cloud.py --- landscape-client-23.02/landscape/lib/tests/test_cloud.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_cloud.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,24 @@ import unittest -from landscape.lib import testing -from landscape.lib.cloud import ( - EC2_API, _fetch_ec2_item, fetch_ec2_meta_data, MAX_LENGTH) -from landscape.lib.fetch import HTTPCodeError, PyCurlError -from twisted.internet.defer import succeed, fail +from twisted.internet.defer import fail +from twisted.internet.defer import succeed +from landscape.lib import testing +from landscape.lib.cloud import _fetch_ec2_item +from landscape.lib.cloud import EC2_API +from landscape.lib.cloud import fetch_ec2_meta_data +from landscape.lib.cloud import MAX_LENGTH +from landscape.lib.fetch import HTTPCodeError +from landscape.lib.fetch import PyCurlError -class CloudTest(testing.HelperTestCase, testing.TwistedTestCase, - unittest.TestCase): +class CloudTest( + testing.HelperTestCase, + testing.TwistedTestCase, + unittest.TestCase, +): def setUp(self): - super(CloudTest, self).setUp() + super().setUp() self.query_results = {} self.kwargs = {} @@ -34,7 +41,7 @@ self.fetch_func. C{value} must be bytes or an Error as the original fetch returns bytes. """ - url = "%s/meta-data/%s" % (EC2_API, name) + url = f"{EC2_API}/meta-data/{name}" self.query_results[url] = value def test_fetch_ec2_meta_data_error_on_any_item_error(self): @@ -52,13 +59,14 @@ if setup_item == item: self.add_query_result(item, error) else: - self.add_query_result(setup_item, "value%s" % setup_item) + self.add_query_result(setup_item, f"value{setup_item}") deferred = fetch_ec2_meta_data(fetch=self.fetch_func) failure = self.failureResultOf(deferred) self.assertEqual( "Server returned HTTP code 404", - failure.getErrorMessage()) + failure.getErrorMessage(), + ) def test_fetch_ec2_meta_data(self): """ @@ -68,10 +76,13 @@ deferred = fetch_ec2_meta_data(fetch=self.fetch_func) result = self.successResultOf(deferred) self.assertEqual( - {"ami-id": u"ami-00002", - "instance-id": u"i00001", - "instance-type": u"hs1.8xlarge"}, - result) + { + "ami-id": "ami-00002", + "instance-id": "i00001", + "instance-type": "hs1.8xlarge", + }, + result, + ) def test_fetch_ec2_meta_data_utf8(self): """ @@ -81,10 +92,14 @@ self.add_query_result("ami-id", b"asdf\xe1\x88\xb4") deferred = fetch_ec2_meta_data(fetch=self.fetch_func) result = self.successResultOf(deferred) - self.assertEqual({"instance-id": u"i00001", - "ami-id": u"asdf\u1234", - "instance-type": u"hs1.8xlarge"}, - result) + self.assertEqual( + { + "instance-id": "i00001", + "ami-id": "asdf\u1234", + "instance-type": "hs1.8xlarge", + }, + result, + ) def test_fetch_ec2_meta_data_truncates(self): """L{_fetch_ec2_meta_data} truncates values that are too long.""" @@ -94,10 +109,13 @@ deferred = fetch_ec2_meta_data(fetch=self.fetch_func) result = self.successResultOf(deferred) self.assertEqual( - {"ami-id": "a" * MAX_LENGTH, - "instance-id": "b" * MAX_LENGTH, - "instance-type": "c" * MAX_LENGTH}, - result) + { + "ami-id": "a" * MAX_LENGTH, + "instance-id": "b" * MAX_LENGTH, + "instance-type": "c" * MAX_LENGTH, + }, + result, + ) def test_wb_fetch_ec2_item_multiple_items_appends_accumulate_list(self): """ @@ -107,10 +125,15 @@ """ accumulate = [] self.successResultOf( - _fetch_ec2_item("instance-id", accumulate, fetch=self.fetch_func)) + _fetch_ec2_item("instance-id", accumulate, fetch=self.fetch_func), + ) self.successResultOf( _fetch_ec2_item( - "instance-type", accumulate, fetch=self.fetch_func)) + "instance-type", + accumulate, + fetch=self.fetch_func, + ), + ) self.assertEqual([b"i00001", b"hs1.8xlarge"], accumulate) def test_wb_fetch_ec2_item_error_returns_failure(self): @@ -122,7 +145,10 @@ self.add_query_result("other-id", PyCurlError(60, "pycurl error")) accumulate = [] deferred = _fetch_ec2_item( - "other-id", accumulate, fetch=self.fetch_func) + "other-id", + accumulate, + fetch=self.fetch_func, + ) failure = self.failureResultOf(deferred) self.assertEqual("Error 60: pycurl error", failure.getErrorMessage()) @@ -135,6 +161,9 @@ self.add_query_result("other-id", PyCurlError(60, "pycurl error")) accumulate = [] deferred = _fetch_ec2_item( - "other-id", accumulate, fetch=self.fetch_func) + "other-id", + accumulate, + fetch=self.fetch_func, + ) self.failureResultOf(deferred) self.assertEqual({"follow": False}, self.kwargs) diff -Nru landscape-client-23.02/landscape/lib/tests/test_config.py landscape-client-23.08/landscape/lib/tests/test_config.py --- landscape-client-23.02/landscape/lib/tests/test_config.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_config.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,18 @@ -from optparse import OptionParser -from textwrap import dedent import io -import mock -import os import os.path import sys import unittest +from optparse import OptionParser +from textwrap import dedent +from unittest import mock -from landscape.lib.fs import read_text_file, create_text_file -from landscape.lib.testing import ( - HelperTestCase, ConfigTestCase, LogKeeperHelper) - -from landscape.lib.config import BaseConfiguration, get_bindir +from landscape.lib.config import BaseConfiguration +from landscape.lib.config import get_bindir +from landscape.lib.fs import create_text_file +from landscape.lib.fs import read_text_file +from landscape.lib.testing import ConfigTestCase +from landscape.lib.testing import HelperTestCase +from landscape.lib.testing import LogKeeperHelper class BabbleConfiguration(BaseConfiguration): @@ -19,7 +20,7 @@ default_config_filenames = [] def make_parser(self): - parser = super(BabbleConfiguration, self).make_parser() + parser = super().make_parser() parser.add_option("--whatever", metavar="STUFF") return parser @@ -30,7 +31,7 @@ default_config_filenames = [] def make_parser(self): - parser = super(MyConfiguration, self).make_parser() + parser = super().make_parser() for name, value in defaults.items(): name = name.replace("_", "-") parser.add_option("--" + name, default=value) @@ -44,7 +45,7 @@ helpers = [LogKeeperHelper] def setUp(self): - super(BaseConfigurationTest, self).setUp() + super().setUp() self.reset_config(cfg_class()) def reset_config(self, configuration_class): @@ -53,10 +54,14 @@ self.parser = self.config.make_parser() def write_config_file(self, **kwargs): - section_name = kwargs.pop("section_name", - self.config_class.config_section) - config = "\n".join(["[%s]" % (section_name,)] + - ["%s = %s" % pair for pair in kwargs.items()]) + section_name = kwargs.pop( + "section_name", + self.config_class.config_section, + ) + config = "\n".join( + [f"[{section_name}]"] + + [f"{key} = {value}" for key, value in kwargs.items()], + ) self.config_filename = self.makeFile(config) self.config.default_config_filenames[:] = [self.config_filename] @@ -129,32 +134,30 @@ default_filename2 = self.makeFile("") explicit_filename = self.makeFile("") loaded_filename = self.makeFile("") - self.config.default_config_filenames[:] = [default_filename1, - default_filename2] + self.config.default_config_filenames[:] = [ + default_filename1, + default_filename2, + ] # If nothing else is set, and the first configuration file # isn't readable, return the second default file. os.chmod(default_filename1, 0) - self.assertEqual(self.config.get_config_filename(), - default_filename2) + self.assertEqual(self.config.get_config_filename(), default_filename2) # If it is readable, than return the first default configuration file. os.chmod(default_filename1, 0o644) - self.assertEqual(self.config.get_config_filename(), - default_filename1) + self.assertEqual(self.config.get_config_filename(), default_filename1) # Unless another file was explicitly loaded before, in which # case return the loaded filename. self.config.load_configuration_file(loaded_filename) - self.assertEqual(self.config.get_config_filename(), - loaded_filename) + self.assertEqual(self.config.get_config_filename(), loaded_filename) # Except in the case where a configuration file was explicitly # requested through the command line or something. In this case, # this is the highest precedence. self.config.config = explicit_filename - self.assertEqual(self.config.get_config_filename(), - explicit_filename) + self.assertEqual(self.config.get_config_filename(), explicit_filename) # ConfigObj @@ -164,8 +167,10 @@ correct file and with its options set in the manor we expect. """ config_obj = self.config._get_config_object() - self.assertEqual(self.config.get_config_filename(), - config_obj.filename) + self.assertEqual( + self.config.get_config_filename(), + config_obj.filename, + ) self.assertFalse(config_obj.list_values) def test_get_config_object_with_alternative_config(self): @@ -175,7 +180,8 @@ get_config_filename. """ config_obj = self.config._get_config_object( - alternative_config=io.StringIO(u"[my-config]\nwhatever = error\n")) + alternative_config=io.StringIO("[my-config]\nwhatever = error\n"), + ) self.assertEqual(None, config_obj.filename) # CLI options @@ -228,9 +234,9 @@ self.assertIs(options.config, None) parser = BaseConfiguration().make_parser( - cfgfile="spam.conf", - datadir="/tmp/spam/data", - ) + cfgfile="spam.conf", + datadir="/tmp/spam/data", + ) options = parser.parse_args([])[0] self.assertEqual(options.config, "spam.conf") @@ -246,8 +252,7 @@ def test_write_to_given_config_file(self): self.reset_config(configuration_class=cfg_class(whatever="spam")) filename = self.makeFile(content="") - self.config.load(["--whatever", "eggs", - "--config", filename]) + self.config.load(["--whatever", "eggs", "--config", filename]) self.config.whatever = "ham" self.config.write() data = read_text_file(filename) @@ -257,8 +262,9 @@ def test_data_directory_option(self): """Ensure options.data_path option can be read by parse_args.""" - options = self.parser.parse_args(["--data-path", - "/opt/hoojy/var/run"])[0] + options = self.parser.parse_args( + ["--data-path", "/opt/hoojy/var/run"], + )[0] self.assertEqual(options.data_path, "/opt/hoojy/var/run") def test_data_directory_default(self): @@ -267,9 +273,9 @@ self.assertIs(options.data_path, None) parser = BaseConfiguration().make_parser( - cfgfile="spam.conf", - datadir="/tmp/spam/data", - ) + cfgfile="spam.conf", + datadir="/tmp/spam/data", + ) options = parser.parse_args([])[0] self.assertEqual(options.data_path, "/tmp/spam/data") @@ -291,9 +297,10 @@ Ensure config option of type int shows up in self.config when config.load is called. """ + class MyConfiguration(self.config_class): def make_parser(self): - parser = super(MyConfiguration, self).make_parser() + parser = super().make_parser() parser.add_option("--year", default=1, type="int") return parser @@ -307,9 +314,10 @@ Ensure command line config option of type int shows up in self.config when config.load is called. """ + class MyConfiguration(self.config_class): def make_parser(self): - parser = super(MyConfiguration, self).make_parser() + parser = super().make_parser() parser.add_option("--year", default=1, type="int") return parser @@ -319,9 +327,7 @@ self.assertEqual(config.year, 2008) def test_load_no_section_available(self): - self.config_class.default_config_filenames = ( - self.makeFile(""), - ) + self.config_class.default_config_filenames = (self.makeFile(""),) self.reset_config(self.config_class) self.config.load([]) @@ -330,12 +336,16 @@ Values that appear after the point in a configuration file where a parsing error occurs are correctly parsed. """ - filename = self.makeFile(dedent(""" + filename = self.makeFile( + dedent( + """ [my-config] computer_title = frog computer_title = flag whatever = spam - """)) + """, + ), + ) self.config.load_configuration_file(filename) self.assertEqual(self.config.whatever, "spam") self.assertIn( @@ -348,11 +358,13 @@ Duplicate keys in the config file shouldn't result in a fatal error, but the first defined value should be used. """ - config = dedent(""" + config = dedent( + """ [my-config] computer_title = frog computer_title = flag - """) + """, + ) filename = self.makeFile(config) self.config.load_configuration_file(filename) self.assertEqual(self.config.computer_title, "frog") @@ -366,12 +378,14 @@ Triplicate keys in the config file shouldn't result in a fatal error, but the first defined value should be used. """ - config = dedent(""" + config = dedent( + """ [my-config] computer_title = frog computer_title = flag computer_title = flop - """) + """, + ) filename = self.makeFile(config) self.config.load_configuration_file(filename) self.assertEqual(self.config.computer_title, "frog") @@ -390,6 +404,7 @@ C{config.load} doesn't exit the process if the default config file is not found and C{accept_nonexistent_default_config} is C{True}. """ + class MyConfiguration(BaseConfiguration): default_config_filenames = ["/not/here"] @@ -402,16 +417,21 @@ C{config.load} exits the process if the specified config file is not found and C{accept_nonexistent_default_config} is C{True}. """ + class MyConfiguration(BaseConfiguration): default_config_filenames = [] config = MyConfiguration() filename = "/not/here" with self.assertRaises(SystemExit) as cm: - config.load(["--config", filename], - accept_nonexistent_default_config=True) - self.assertEqual(str(cm.exception), - "error: config file %s can't be read" % filename) + config.load( + ["--config", filename], + accept_nonexistent_default_config=True, + ) + self.assertEqual( + str(cm.exception), + f"error: config file {filename} can't be read", + ) def test_load_cannot_read(self): """ @@ -422,8 +442,10 @@ os.chmod(filename, 0) with self.assertRaises(SystemExit) as cm: self.config.load(["--config", filename]) - self.assertEqual(str(cm.exception), - "error: config file %s can't be read" % filename) + self.assertEqual( + str(cm.exception), + f"error: config file {filename} can't be read", + ) def test_load_not_found(self): """ @@ -433,8 +455,10 @@ filename = "/not/here" with self.assertRaises(SystemExit) as cm: self.config.load(["--config", filename]) - self.assertEqual(str(cm.exception), - "error: config file %s can't be read" % filename) + self.assertEqual( + str(cm.exception), + f"error: config file {filename} can't be read", + ) def test_load_cannot_read_default(self): """ @@ -446,8 +470,10 @@ os.chmod(default, 0) with self.assertRaises(SystemExit) as cm: self.config.load([]) - self.assertEqual(str(cm.exception), - "error: config file %s can't be read" % default) + self.assertEqual( + str(cm.exception), + f"error: config file {default} can't be read", + ) def test_load_not_found_default(self): """ @@ -457,8 +483,10 @@ [default] = self.config.default_config_filenames[:] = ["/not/here"] with self.assertRaises(SystemExit) as cm: self.config.load([]) - self.assertEqual(str(cm.exception), - "error: config file %s can't be read" % default) + self.assertEqual( + str(cm.exception), + f"error: config file {default} can't be read", + ) def test_load_cannot_read_many_defaults(self): """ @@ -473,8 +501,10 @@ with self.assertRaises(SystemExit) as cm: self.config.load([]) - self.assertEqual(str(cm.exception), - "error: no config file could be read") + self.assertEqual( + str(cm.exception), + "error: no config file could be read", + ) # saving @@ -497,14 +527,20 @@ def test_write_existing_file(self): self.config_filename = self.makeFile( - "\n[other]\nfoo = bar\n[again]\nx = y\n") + "\n[other]\nfoo = bar\n[again]\nx = y\n", + ) self.config.default_config_filenames[:] = [self.config_filename] self.config.whatever = "eggs" self.config.write() data = read_text_file(self.config_filename) - self.assertConfigEqual(data, ("[other]\nfoo = bar\n" - "[again]\nx = y\n" - "[my-config]\nwhatever = eggs")) + self.assertConfigEqual( + data, + ( + "[other]\nfoo = bar\n" + "[again]\nx = y\n" + "[my-config]\nwhatever = eggs" + ), + ) def test_write_existing_section(self): self.write_config_file() @@ -534,7 +570,8 @@ data = read_text_file(config_filename) self.assertConfigEqual( data, - "[my-config]\nwhatever = boo\n\n[goojy]\nunrelated = yes") + "[my-config]\nwhatever = boo\n\n[goojy]\nunrelated = yes", + ) def test_write_on_the_right_default_config_file(self): self.write_config_file(whatever="spam") @@ -648,7 +685,8 @@ new_config = read_text_file(filename) self.assertConfigEqual( new_config, - "[my-config]\n# Comment 1\nwhatever = eggs\n#Comment 2\n") + "[my-config]\n# Comment 1\nwhatever = eggs\n#Comment 2\n", + ) class GetBindirTest(unittest.TestCase): @@ -663,8 +701,8 @@ self.assertEqual("/spam/eggs", bindir) - def test_config_has_None_bindir(self): - """get_bindir() """ + def test_config_has_None_bindir(self): # noqa: N802 + """get_bindir()""" cfg = BaseConfiguration() cfg.bindir = None bindir = get_bindir(cfg) @@ -672,14 +710,14 @@ self.assertEqual(self.BIN_DIR, bindir) def test_config_has_no_bindir(self): - """get_bindir() """ + """get_bindir()""" cfg = object() bindir = get_bindir(cfg) self.assertEqual(self.BIN_DIR, bindir) - def test_config_is_None(self): - """get_bindir() """ + def test_config_is_None(self): # noqa: N802 + """get_bindir()""" bindir = get_bindir(None) self.assertEqual(self.BIN_DIR, bindir) diff -Nru landscape-client-23.02/landscape/lib/tests/test_disk.py landscape-client-23.08/landscape/lib/tests/test_disk.py --- landscape-client-23.02/landscape/lib/tests/test_disk.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_disk.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,12 @@ import os import unittest - -from mock import patch +from unittest.mock import patch from landscape.lib import testing -from landscape.lib.disk import ( - get_filesystem_for_path, get_mount_info, is_device_removable, - _get_device_removable_file_path) +from landscape.lib.disk import _get_device_removable_file_path +from landscape.lib.disk import get_filesystem_for_path +from landscape.lib.disk import get_mount_info +from landscape.lib.disk import is_device_removable class BaseTestCase(testing.FSTestCase, unittest.TestCase): @@ -14,9 +14,8 @@ class DiskUtilitiesTest(BaseTestCase): - def setUp(self): - super(DiskUtilitiesTest, self).setUp() + super().setUp() self.mount_file = self.makeFile("") self.stat_results = {} @@ -31,7 +30,7 @@ else: raise OSError("Permission denied") - def set_mount_points(self, points, read_access=True, fs='ext4'): + def set_mount_points(self, points, read_access=True, fs="ext4"): """ This method prepares a fake mounts file containing the mount points specified in the C{points} list of strings. This file @@ -41,14 +40,17 @@ yield a permission denied error when inspected. """ self.read_access = read_access - content = "\n".join("/dev/sda%d %s %s rw 0 0" % (i, point, fs) - for i, point in enumerate(points)) + content = "\n".join( + f"/dev/sda{i:d} {point} {fs} rw 0 0" + for i, point in enumerate(points) + ) f = open(self.mount_file, "w") f.write(content) f.close() for point in points: self.stat_results[point] = os.statvfs_result( - (4096, 0, 1000, 500, 0, 0, 0, 0, 0, 0)) + (4096, 0, 1000, 500, 0, 0, 0, 0, 0, 0), + ) def test_get_filesystem_for_path(self): self.set_mount_points(["/"]) @@ -58,7 +60,8 @@ def test_get_filesystem_subpath(self): self.set_mount_points(["/"]) self.stat_results["/"] = os.statvfs_result( - (4096, 0, 1000, 500, 0, 0, 0, 0, 0, 0)) + (4096, 0, 1000, 500, 0, 0, 0, 0, 0, 0), + ) info = get_filesystem_for_path("/home", self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/") @@ -73,7 +76,7 @@ self.assertEqual(info["mount-point"], "/") def test_get_filesystem_weird_fs(self): - self.set_mount_points(["/"], fs='simfs') + self.set_mount_points(["/"], fs="simfs") info = get_filesystem_for_path("/home", self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/") @@ -82,8 +85,11 @@ os.symlink("/foo/bar", symlink_path) self.addCleanup(os.remove, symlink_path) self.set_mount_points(["/", "/foo"]) - info = get_filesystem_for_path(symlink_path, - self.mount_file, self.statvfs) + info = get_filesystem_for_path( + symlink_path, + self.mount_file, + self.statvfs, + ) self.assertEqual(info["mount-point"], "/foo") def test_ignore_unreadable_mount_point(self): @@ -93,7 +99,10 @@ """ self.set_mount_points(["/secret"], read_access=False) info = get_filesystem_for_path( - "/secret", self.mount_file, self.statvfs) + "/secret", + self.mount_file, + self.statvfs, + ) self.assertIs(info, None) def test_ignore_unmounted_and_virtual_mountpoints(self): @@ -102,26 +111,35 @@ ensure non-regression on bug #1045374. """ self.read_access = True - content = "\n".join(["auto_direct /opt/whatever autofs", - "none /run/lock tmpfs", - "proc /proc proc", - "/dev/sda1 /home ext4"]) + content = "\n".join( + [ + "auto_direct /opt/whatever autofs", + "none /run/lock tmpfs", + "proc /proc proc", + "/dev/sda1 /home ext4", + ], + ) f = open(self.mount_file, "w") f.write(content) f.close() self.stat_results["/home"] = os.statvfs_result( - (4096, 0, 1000, 500, 0, 0, 0, 0, 0, 0)) + (4096, 0, 1000, 500, 0, 0, 0, 0, 0, 0), + ) result = [x for x in get_mount_info(self.mount_file, self.statvfs)] - expected = {"device": "/dev/sda1", "mount-point": "/home", - "filesystem": "ext4", "total-space": 3, "free-space": 1} + expected = { + "device": "/dev/sda1", + "mount-point": "/home", + "filesystem": "ext4", + "total-space": 3, + "free-space": 1, + } self.assertEqual([expected], result) class RemovableDiskTest(BaseTestCase): - @patch("os.path.islink") def test_wb_get_device_removable_file_path(self, is_link_mock): """ diff -Nru landscape-client-23.02/landscape/lib/tests/test_encoding.py landscape-client-23.08/landscape/lib/tests/test_encoding.py --- landscape-client-23.02/landscape/lib/tests/test_encoding.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_encoding.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- import codecs import unittest -from landscape.lib.encoding import encode_if_needed, encode_values +from landscape.lib.encoding import encode_if_needed +from landscape.lib.encoding import encode_values -EXPECTED_UTF8 = codecs.encode(u"請不要刪除", "utf-8") +EXPECTED_UTF8 = codecs.encode("請不要刪除", "utf-8") class EncodingTest(unittest.TestCase): - def test_encode_if_needed_utf_string(self): """ When passed an utf-8 str() instance the encode_if_needed function @@ -25,9 +24,9 @@ the encode_if_needed function returns the utf-16 str() equivalent (in utf-8). """ - value = u"Alex \U0001f603" + value = "Alex \U0001f603" result = encode_if_needed(value) - expected = b'Alex \xf0\x9f\x98\x83' + expected = b"Alex \xf0\x9f\x98\x83" self.assertEqual(expected, result) def test_encode_if_needed_utf_unicode(self): @@ -35,7 +34,7 @@ When passed an unicode instance that is a decode()'d unicode, the encode_if_needed function returns the utf-8 str() equivalent. """ - value = u'\u8acb\u4e0d\u8981\u522a\u9664' + value = "\u8acb\u4e0d\u8981\u522a\u9664" result = encode_if_needed(value) self.assertEqual(EXPECTED_UTF8, result) @@ -44,7 +43,7 @@ When passed an encoded() unicode instance, the encode_if_needed function returns the utf-8 str() equivalent. """ - value = u"請不要刪除" + value = "請不要刪除" result = encode_if_needed(value) self.assertEqual(EXPECTED_UTF8, result) @@ -58,7 +57,9 @@ """ When passed in a dictionary, all unicode is encoded and bytes are left. """ - original = {"a": b"Alex \xf0\x9f\x98\x83", "b": u"Alex \U0001f603"} - expected = {"a": b"Alex \xf0\x9f\x98\x83", - "b": b"Alex \xf0\x9f\x98\x83"} + original = {"a": b"Alex \xf0\x9f\x98\x83", "b": "Alex \U0001f603"} + expected = { + "a": b"Alex \xf0\x9f\x98\x83", + "b": b"Alex \xf0\x9f\x98\x83", + } self.assertEqual(expected, encode_values(original)) diff -Nru landscape-client-23.02/landscape/lib/tests/test_fd.py landscape-client-23.08/landscape/lib/tests/test_fd.py --- landscape-client-23.02/landscape/lib/tests/test_fd.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_fd.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,8 @@ """Tests for L{landscape.lib.fd}""" - -import unittest - import resource -from mock import patch, call +import unittest +from unittest.mock import call +from unittest.mock import patch from landscape.lib.fd import clean_fds @@ -47,7 +46,7 @@ self.assertEqual(closed_fds, expected_fds) - def test_ignore_OSErrors(self): + def test_ignore_OSErrors(self): # noqa: N802 """ If os.close raises an OSError, it is ignored and we continue to close the rest of the FDs. diff -Nru landscape-client-23.02/landscape/lib/tests/test_fetch.py landscape-client-23.08/landscape/lib/tests/test_fetch.py --- landscape-client-23.02/landscape/lib/tests/test_fetch.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_fetch.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,20 +1,22 @@ import os -from threading import local import unittest +from threading import local import pycurl - from twisted.internet.defer import FirstError from twisted.python.compat import unicode from landscape.lib import testing -from landscape.lib.fetch import ( - fetch, fetch_async, fetch_many_async, fetch_to_files, - url_to_filename, HTTPCodeError, PyCurlError) +from landscape.lib.fetch import fetch +from landscape.lib.fetch import fetch_async +from landscape.lib.fetch import fetch_many_async +from landscape.lib.fetch import fetch_to_files +from landscape.lib.fetch import HTTPCodeError +from landscape.lib.fetch import PyCurlError +from landscape.lib.fetch import url_to_filename -class CurlStub(object): - +class CurlStub: def __init__(self, result=None, infos=None, error=None): self.result = result self.infos = infos @@ -27,7 +29,7 @@ def getinfo(self, what): if what in self.infos: return self.infos[what] - raise RuntimeError("Stub doesn't know about %d info" % what) + raise RuntimeError(f"Stub doesn't know about {what:d} info") def setopt(self, option, value): if isinstance(value, unicode): @@ -45,8 +47,7 @@ self.performed = True -class CurlManyStub(object): - +class CurlManyStub: def __init__(self, url_results): self.curls = {} for url in url_results: @@ -58,7 +59,9 @@ body = result[0] http_code = result[1] self.curls[url] = CurlStub( - result=body, infos={pycurl.HTTP_CODE: http_code}) + result=body, + infos={pycurl.HTTP_CODE: http_code}, + ) # Use thread local storage to keep the current CurlStub since # CurlManyStub is passed to multiple threads, but the state needs to be @@ -79,91 +82,108 @@ # want to keep it like this. But when we set the value for pycurl # option we already have the encoded bytes object, that's why we # have to decode again. - self._local.current = self.curls[value.decode('ascii')] + self._local.current = self.curls[value.decode("ascii")] self._local.current.setopt(option, value) def perform(self): self._local.current.perform() -class Any(object): - +class Any: def __eq__(self, other): return True -class FetchTest(testing.FSTestCase, testing.TwistedTestCase, - unittest.TestCase): - +class FetchTest( + testing.FSTestCase, + testing.TwistedTestCase, + unittest.TestCase, +): def test_basic(self): curl = CurlStub(b"result") result = fetch("http://example.com", curl=curl) self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_post(self): curl = CurlStub(b"result") result = fetch("http://example.com", post=True, curl=curl) self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.POST: True, - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.POST: True, + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_post_data(self): curl = CurlStub(b"result") result = fetch("http://example.com", post=True, data="data", curl=curl) self.assertEqual(result, b"result") self.assertEqual(curl.options[pycurl.READFUNCTION](), b"data") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.POST: True, - pycurl.POSTFIELDSIZE: 4, - pycurl.READFUNCTION: Any(), - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.POST: True, + pycurl.POSTFIELDSIZE: 4, + pycurl.READFUNCTION: Any(), + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_cainfo(self): curl = CurlStub(b"result") result = fetch("https://example.com", cainfo="cainfo", curl=curl) self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"https://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.CAINFO: b"cainfo", - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + self.assertEqual( + curl.options, + { + pycurl.URL: b"https://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.CAINFO: b"cainfo", + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_cainfo_on_http(self): curl = CurlStub(b"result") @@ -173,38 +193,53 @@ def test_headers(self): curl = CurlStub(b"result") - result = fetch("http://example.com", - headers={"a": "1", "b": "2"}, curl=curl) - self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.HTTPHEADER: ["a: 1", "b: 2"], - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + result = fetch( + "http://example.com", + headers={"a": "1", "b": "2"}, + curl=curl, + ) + self.assertEqual(result, b"result") + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.HTTPHEADER: ["a: 1", "b: 2"], + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_timeouts(self): curl = CurlStub(b"result") - result = fetch("http://example.com", connect_timeout=5, - total_timeout=30, curl=curl) - self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 5, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 30, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + result = fetch( + "http://example.com", + connect_timeout=5, + total_timeout=30, + curl=curl, + ) + self.assertEqual(result, b"result") + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 5, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 30, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_unicode(self): """ @@ -212,7 +247,7 @@ passing it to curl. """ curl = CurlStub(b"result") - result = fetch(u"http://example.com", curl=curl) + result = fetch("http://example.com", curl=curl) self.assertEqual(result, b"result") self.assertEqual(curl.options[pycurl.URL], b"http://example.com") self.assertTrue(isinstance(curl.options[pycurl.URL], bytes)) @@ -228,12 +263,16 @@ self.fail("HTTPCodeError not raised") def test_http_error_str(self): - self.assertEqual(str(HTTPCodeError(501, "")), - "Server returned HTTP code 501") + self.assertEqual( + str(HTTPCodeError(501, "")), + "Server returned HTTP code 501", + ) def test_http_error_repr(self): - self.assertEqual(repr(HTTPCodeError(501, "")), - "") + self.assertEqual( + repr(HTTPCodeError(501, "")), + "", + ) def test_pycurl_error(self): curl = CurlStub(error=pycurl.error(60, "pycurl error")) @@ -247,41 +286,50 @@ def test_pycurl_insecure(self): curl = CurlStub(b"result") - result = fetch("http://example.com/get-ca-cert", curl=curl, - insecure=True) - self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com/get-ca-cert", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.SSL_VERIFYPEER: False, - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) + result = fetch( + "http://example.com/get-ca-cert", + curl=curl, + insecure=True, + ) + self.assertEqual(result, b"result") + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com/get-ca-cert", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.SSL_VERIFYPEER: False, + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_pycurl_error_str(self): - self.assertEqual(str(PyCurlError(60, "pycurl error")), - "Error 60: pycurl error") + self.assertEqual( + str(PyCurlError(60, "pycurl error")), + "Error 60: pycurl error", + ) def test_pycurl_error_repr(self): - self.assertEqual(repr(PyCurlError(60, "pycurl error")), - "") + self.assertEqual( + repr(PyCurlError(60, "pycurl error")), + "", + ) def test_pycurl_follow_true(self): curl = CurlStub(b"result") - result = fetch("http://example.com", curl=curl, - follow=True) + result = fetch("http://example.com", curl=curl, follow=True) self.assertEqual(result, b"result") self.assertEqual(1, curl.options[pycurl.FOLLOWLOCATION]) def test_pycurl_follow_false(self): curl = CurlStub(b"result") - result = fetch("http://example.com", curl=curl, - follow=False) + result = fetch("http://example.com", curl=curl, follow=False) self.assertEqual(result, b"result") self.assertNotIn(pycurl.FOLLOWLOCATION, curl.options.keys()) @@ -289,7 +337,10 @@ """If provided, the user-agent is set in the request.""" curl = CurlStub(b"result") result = fetch( - "http://example.com", curl=curl, user_agent="user-agent") + "http://example.com", + curl=curl, + user_agent="user-agent", + ) self.assertEqual(result, b"result") self.assertEqual(b"user-agent", curl.options[pycurl.USERAGENT]) @@ -299,34 +350,35 @@ proxy = "http://my.little.proxy" result = fetch("http://example.com", curl=curl, proxy=proxy) self.assertEqual(b"result", result) - self.assertEqual(proxy.encode('ascii'), curl.options[pycurl.PROXY]) + self.assertEqual(proxy.encode("ascii"), curl.options[pycurl.PROXY]) def test_create_curl(self): curls = [] - def pycurl_Curl(): + def pycurl_curl(): curl = CurlStub(b"result") curls.append(curl) return curl - Curl = pycurl.Curl - try: - pycurl.Curl = pycurl_Curl - result = fetch("http://example.com") - curl = curls[0] - self.assertEqual(result, b"result") - self.assertEqual(curl.options, - {pycurl.URL: b"http://example.com", - pycurl.FOLLOWLOCATION: 1, - pycurl.MAXREDIRS: 5, - pycurl.CONNECTTIMEOUT: 30, - pycurl.LOW_SPEED_LIMIT: 1, - pycurl.LOW_SPEED_TIME: 600, - pycurl.NOSIGNAL: 1, - pycurl.WRITEFUNCTION: Any(), - pycurl.DNS_CACHE_TIMEOUT: 0, - pycurl.ENCODING: b"gzip,deflate"}) - finally: - pycurl.Curl = Curl + + pycurl.Curl = pycurl_curl + result = fetch("http://example.com") + curl = curls[0] + self.assertEqual(result, b"result") + self.assertEqual( + curl.options, + { + pycurl.URL: b"http://example.com", + pycurl.FOLLOWLOCATION: 1, + pycurl.MAXREDIRS: 5, + pycurl.CONNECTTIMEOUT: 30, + pycurl.LOW_SPEED_LIMIT: 1, + pycurl.LOW_SPEED_TIME: 600, + pycurl.NOSIGNAL: 1, + pycurl.WRITEFUNCTION: Any(), + pycurl.DNS_CACHE_TIMEOUT: 0, + pycurl.ENCODING: b"gzip,deflate", + }, + ) def test_async_fetch(self): curl = CurlStub(b"result") @@ -334,6 +386,7 @@ def got_result(result): self.assertEqual(result, b"result") + return d.addCallback(got_result) def test_async_fetch_with_error(self): @@ -344,6 +397,7 @@ self.assertEqual(failure.value.http_code, 501) self.assertEqual(failure.value.body, b"result") return failure + d.addErrback(got_error) self.assertFailure(d, HTTPCodeError) return d @@ -354,8 +408,7 @@ C{DeferredList} firing its callback when all the URLs have successfully completed. """ - url_results = {"http://good/": b"good", - "http://better/": b"better"} + url_results = {"http://good/": b"good", "http://better/": b"better"} def callback(result, url): self.assertIn(result, url_results.values()) @@ -366,8 +419,12 @@ self.fail() curl = CurlManyStub(url_results) - d = fetch_many_async(url_results.keys(), callback=callback, - errback=errback, curl=curl) + d = fetch_many_async( + url_results.keys(), + callback=callback, + errback=errback, + curl=curl, + ) def completed(result): self.assertEqual(url_results, {}) @@ -378,9 +435,11 @@ """ L{fetch_many_async} aborts as soon as one URL fails. """ - url_results = {"http://right/": b"right", - "http://wrong/": (b"wrong", 501), - "http://impossible/": b"impossible"} + url_results = { + "http://right/": b"right", + "http://wrong/": (b"wrong", 501), + "http://impossible/": b"impossible", + } failed_urls = [] def errback(failure, url): @@ -391,12 +450,17 @@ curl = CurlManyStub(url_results) urls = ["http://right/", "http://wrong/", "http://impossible/"] - result = fetch_many_async(urls, callback=None, - errback=errback, curl=curl) + result = fetch_many_async( + urls, + callback=None, + errback=errback, + curl=curl, + ) def check_failure(failure): - self.assertTrue(isinstance(failure.subFailure.value, - HTTPCodeError)) + self.assertTrue( + isinstance(failure.subFailure.value, HTTPCodeError), + ) self.assertEqual(failed_urls, ["http://wrong/"]) self.assertFailure(result, FirstError) @@ -409,16 +473,20 @@ """ self.assertEqual(url_to_filename("http://some/file"), "file") self.assertEqual(url_to_filename("http://some/file/"), "file") - self.assertEqual(url_to_filename("http://some/file", directory="dir"), - os.path.join("dir", "file")) + self.assertEqual( + url_to_filename("http://some/file", directory="dir"), + os.path.join("dir", "file"), + ) def test_fetch_to_files(self): """ L{fetch_to_files} fetches a list of URLs and save their content in the given directory. """ - url_results = {"http://good/file": b"file", - "http://even/better-file": b"better-file"} + url_results = { + "http://good/file": b"file", + "http://even/better-file": b"better-file", + } directory = self.makeDir() curl = CurlManyStub(url_results) @@ -427,7 +495,7 @@ def check_files(ignored): for url, result in url_results.items(): filename = url.rstrip("/").split("/")[-1] - fd = open(os.path.join(directory, filename), 'rb') + fd = open(os.path.join(directory, filename), "rb") self.assertEqual(fd.read(), result) fd.close() @@ -455,22 +523,33 @@ L{fetch_to_files} optionally logs an error message as soon as one URL fails, and aborts. """ - url_results = {"http://im/right": b"right", - "http://im/wrong": (b"wrong", 404), - "http://im/not": b"not"} + url_results = { + "http://im/right": b"right", + "http://im/wrong": (b"wrong", 404), + "http://im/not": b"not", + } directory = self.makeDir() messages = [] - logger = (lambda message: messages.append(message)) + + def logger(message): + return messages.append(message) + curl = CurlManyStub(url_results) - result = fetch_to_files(url_results.keys(), directory, logger=logger, - curl=curl) + result = fetch_to_files( + url_results.keys(), + directory, + logger=logger, + curl=curl, + ) def check_messages(failure): self.assertEqual(len(messages), 1) - self.assertEqual(messages[0], - "Couldn't fetch file from http://im/wrong " - "(Server returned HTTP code 404)") + self.assertEqual( + messages[0], + "Couldn't fetch file from http://im/wrong " + "(Server returned HTTP code 404)", + ) messages.pop() def check_files(ignored): @@ -494,9 +573,13 @@ def check_error(failure): error = str(failure.value.subFailure.value) - self.assertEqual(error, - ("[Errno 2] No such file or directory: " - "'i/dont/exist/right'")) + self.assertEqual( + error, + ( + "[Errno 2] No such file or directory: " + "'i/dont/exist/right'" + ), + ) self.assertFalse(os.path.exists(os.path.join(directory, "right"))) result.addErrback(check_error) diff -Nru landscape-client-23.02/landscape/lib/tests/test_format.py landscape-client-23.08/landscape/lib/tests/test_format.py --- landscape-client-23.02/landscape/lib/tests/test_format.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_format.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,6 +1,8 @@ import unittest -from landscape.lib.format import format_object, format_delta, format_percent +from landscape.lib.format import format_delta +from landscape.lib.format import format_object +from landscape.lib.format import format_percent def function(): @@ -8,29 +10,32 @@ class FormatObjectTest(unittest.TestCase): - def test_format_instance(self): - self.assertEqual(format_object(self), - "landscape.lib.tests.test_format.FormatObjectTest") + self.assertEqual( + format_object(self), + "landscape.lib.tests.test_format.FormatObjectTest", + ) def method(self): pass def test_format_method(self): - self.assertEqual(format_object(self.method), - ("landscape.lib.tests.test_format" - ".FormatObjectTest.method()")) + self.assertEqual( + format_object(self.method), + ("landscape.lib.tests.test_format" ".FormatObjectTest.method()"), + ) def test_format_function(self): - self.assertEqual(format_object(function), - "landscape.lib.tests.test_format.function()") + self.assertEqual( + format_object(function), + "landscape.lib.tests.test_format.function()", + ) # FIXME Write tests to make sure that inner functions render # usefully. class FormatDeltaTest(unittest.TestCase): - def test_format_float(self): self.assertEqual(format_delta(0.0), "0.00s") self.assertEqual(format_delta(47.16374), "47.16s") @@ -46,7 +51,6 @@ class FormatPercentTest(unittest.TestCase): - def test_format_float(self): self.assertEqual(format_percent(0.0), "0.00%") self.assertEqual(format_percent(47.16374), "47.16%") diff -Nru landscape-client-23.02/landscape/lib/tests/test_fs.py landscape-client-23.08/landscape/lib/tests/test_fs.py --- landscape-client-23.02/landscape/lib/tests/test_fs.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_fs.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,17 @@ -# -*- coding: utf-8 -*- import codecs import os -from mock import patch import time import unittest +from unittest.mock import patch from twisted.python.compat import long from landscape.lib import testing -from landscape.lib.fs import append_text_file, append_binary_file, touch_file -from landscape.lib.fs import read_text_file, read_binary_file +from landscape.lib.fs import append_binary_file +from landscape.lib.fs import append_text_file +from landscape.lib.fs import read_binary_file +from landscape.lib.fs import read_text_file +from landscape.lib.fs import touch_file class BaseTestCase(testing.FSTestCase, unittest.TestCase): @@ -17,7 +19,6 @@ class ReadFileTest(BaseTestCase): - def test_read_binary_file(self): """ With no options L{read_binary_file} reads the whole file passed as @@ -49,61 +50,64 @@ """ path = self.makeFile("foo bar from end") self.assertEqual( - read_binary_file(path, limit=100), b"foo bar from end") + read_binary_file(path, limit=100), + b"foo bar from end", + ) self.assertEqual( - read_binary_file(path, limit=-100), b"foo bar from end") + read_binary_file(path, limit=-100), + b"foo bar from end", + ) def test_read_text_file(self): """ With no options L{read_text_file} reads the whole file passed as argument as string decoded with utf-8. """ - utf8_content = codecs.encode(u"foo \N{SNOWMAN}", "utf-8") + utf8_content = codecs.encode("foo \N{SNOWMAN}", "utf-8") path = self.makeFile(utf8_content, mode="wb") - self.assertEqual(read_text_file(path), u"foo ☃") + self.assertEqual(read_text_file(path), "foo ☃") def test_read_text_file_with_limit(self): """ With a positive limit L{read_text_file} returns up to L{limit} characters from the start of the file. """ - utf8_content = codecs.encode(u"foo \N{SNOWMAN}", "utf-8") + utf8_content = codecs.encode("foo \N{SNOWMAN}", "utf-8") path = self.makeFile(utf8_content, mode="wb") - self.assertEqual(read_text_file(path, limit=3), u"foo") + self.assertEqual(read_text_file(path, limit=3), "foo") def test_read_text_file_with_negative_limit(self): """ With a negative limit L{read_text_file} reads only the tail characters of the string. """ - utf8_content = codecs.encode(u"foo \N{SNOWMAN} bar", "utf-8") + utf8_content = codecs.encode("foo \N{SNOWMAN} bar", "utf-8") path = self.makeFile(utf8_content, mode="wb") - self.assertEqual(read_text_file(path, limit=-5), u"☃ bar") + self.assertEqual(read_text_file(path, limit=-5), "☃ bar") def test_read_text_file_with_limit_bigger_than_file(self): """ If the limit is bigger than the file L{read_text_file} reads the entire file. """ - utf8_content = codecs.encode(u"foo \N{SNOWMAN} bar", "utf-8") + utf8_content = codecs.encode("foo \N{SNOWMAN} bar", "utf-8") path = self.makeFile(utf8_content, mode="wb") - self.assertEqual(read_text_file(path, limit=100), u"foo ☃ bar") - self.assertEqual(read_text_file(path, limit=-100), u"foo ☃ bar") + self.assertEqual(read_text_file(path, limit=100), "foo ☃ bar") + self.assertEqual(read_text_file(path, limit=-100), "foo ☃ bar") def test_read_text_file_with_broken_utf8(self): """ A text file containing broken UTF-8 shouldn't cause an error, just return some sensible replacement chars. """ - not_quite_utf8_content = b'foo \xca\xff bar' - path = self.makeFile(not_quite_utf8_content, mode='wb') - self.assertEqual(read_text_file(path), u'foo \ufffd\ufffd bar') - self.assertEqual(read_text_file(path, limit=5), u'foo \ufffd') - self.assertEqual(read_text_file(path, limit=-3), u'bar') + not_quite_utf8_content = b"foo \xca\xff bar" + path = self.makeFile(not_quite_utf8_content, mode="wb") + self.assertEqual(read_text_file(path), "foo \ufffd\ufffd bar") + self.assertEqual(read_text_file(path, limit=5), "foo \ufffd") + self.assertEqual(read_text_file(path, limit=-3), "bar") class TouchFileTest(BaseTestCase): - @patch("os.utime") def test_touch_file(self, utime_mock): """ @@ -134,25 +138,29 @@ expected_time = current_time - 1 with patch.object( - time, "time", return_value=current_time) as time_mock: + time, + "time", + return_value=current_time, + ) as time_mock: with patch.object(os, "utime") as utime_mock: touch_file(path, offset_seconds=-1) time_mock.assert_called_once_with() utime_mock.assert_called_once_with( - path, (expected_time, expected_time)) + path, + (expected_time, expected_time), + ) self.assertFileContent(path, b"") class AppendFileTest(BaseTestCase): - def test_append_existing_text_file(self): """ The L{append_text_file} function appends contents to an existing file. """ existing_file = self.makeFile("foo bar") - append_text_file(existing_file, u" baz ☃") + append_text_file(existing_file, " baz ☃") self.assertFileContent(existing_file, b"foo bar baz \xe2\x98\x83") def test_append_text_no_file(self): @@ -161,7 +169,7 @@ exist already. """ new_file = os.path.join(self.makeDir(), "new_file") - append_text_file(new_file, u"contents ☃") + append_text_file(new_file, "contents ☃") self.assertFileContent(new_file, b"contents \xe2\x98\x83") def test_append_existing_binary_file(self): diff -Nru landscape-client-23.02/landscape/lib/tests/test_gpg.py landscape-client-23.08/landscape/lib/tests/test_gpg.py --- landscape-client-23.02/landscape/lib/tests/test_gpg.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_gpg.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,28 +1,30 @@ -import mock import os import textwrap import unittest +from unittest import mock from twisted.internet import reactor -from twisted.internet.defer import Deferred, inlineCallbacks +from twisted.internet.defer import Deferred +from twisted.internet.defer import inlineCallbacks from landscape.lib import testing from landscape.lib.gpg import gpg_verify class GPGTest(testing.FSTestCase, testing.TwistedTestCase, unittest.TestCase): - def test_gpg_verify(self): """ L{gpg_verify} runs the given gpg binary and returns C{True} if the provided signature is valid. """ aptdir = self.makeDir() - os.mknod("{}/trusted.gpg".format(aptdir)) + os.mknod(f"{aptdir}/trusted.gpg") gpg_options = self.makeFile() - gpg = self.makeFile("#!/bin/sh\n" - "touch $3/trustdb.gpg\n" - "echo -n $@ > %s\n" % gpg_options) + gpg = self.makeFile( + "#!/bin/sh\n" + "touch $3/trustdb.gpg\n" + f"echo -n $@ > {gpg_options}\n", + ) os.chmod(gpg, 0o755) gpg_home = self.makeDir() deferred = Deferred() @@ -31,14 +33,19 @@ def do_test(mkdtemp_mock): mkdtemp_mock.return_value = gpg_home result = gpg_verify( - "/some/file", "/some/signature", gpg=gpg, apt_dir=aptdir) + "/some/file", + "/some/signature", + gpg=gpg, + apt_dir=aptdir, + ) def check_result(ignored): self.assertEqual( open(gpg_options).read(), - "--no-options --homedir %s --no-default-keyring " - "--ignore-time-conflict --keyring %s/trusted.gpg " - "--verify /some/signature /some/file" % (gpg_home, aptdir)) + f"--no-options --homedir {gpg_home} --no-default-keyring " + f"--ignore-time-conflict --keyring {aptdir}/trusted.gpg " + "--verify /some/signature /some/file", + ) self.assertFalse(os.path.exists(gpg_home)) result.addCallback(check_result) @@ -63,9 +70,10 @@ result = gpg_verify("/some/file", "/some/signature", gpg=gpg) def check_failure(failure): - self.assertEqual(str(failure.value), - "%s failed (out='out\n', err='err\n', " - "code='1')" % gpg) + self.assertEqual( + str(failure.value), + f"{gpg} failed (out='out\n', err='err\n', " "code='1')", + ) self.assertFalse(os.path.exists(gpg_home)) result.addCallback(self.fail) @@ -81,23 +89,31 @@ gpg_verify uses keys from the trusted.gpg.d if such a folder exists. """ apt_dir = self.makeDir() - os.mkdir("{}/trusted.gpg.d".format(apt_dir)) - os.mknod("{}/trusted.gpg.d/foo.gpg".format(apt_dir)) - os.mknod("{}/trusted.gpg.d/baz.gpg".format(apt_dir)) - os.mknod("{}/trusted.gpg.d/bad.gpg~".format(apt_dir)) + os.mkdir(f"{apt_dir}/trusted.gpg.d") + os.mknod(f"{apt_dir}/trusted.gpg.d/foo.gpg") + os.mknod(f"{apt_dir}/trusted.gpg.d/baz.gpg") + os.mknod(f"{apt_dir}/trusted.gpg.d/bad.gpg~") gpg_call = self.makeFile() - fake_gpg = self.makeFile(textwrap.dedent("""\ + fake_gpg = self.makeFile( + textwrap.dedent( + """\ #!/bin/sh touch $3/trustdb.gpg echo -n $@ > {} - """).format(gpg_call)) + """, + ).format(gpg_call), + ) os.chmod(fake_gpg, 0o755) gpg_home = self.makeDir() with mock.patch("tempfile.mkdtemp", return_value=gpg_home): yield gpg_verify( - "/some/file", "/some/signature", gpg=fake_gpg, apt_dir=apt_dir) + "/some/file", + "/some/signature", + gpg=fake_gpg, + apt_dir=apt_dir, + ) expected = ( "--no-options --homedir {gpg_home} --no-default-keyring " diff -Nru landscape-client-23.02/landscape/lib/tests/test_juju.py landscape-client-23.08/landscape/lib/tests/test_juju.py --- landscape-client-23.02/landscape/lib/tests/test_juju.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_juju.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,20 +1,28 @@ -from collections import namedtuple import json import unittest +from collections import namedtuple from landscape.lib import testing from landscape.lib.juju import get_juju_info -SAMPLE_JUJU_INFO = json.dumps({"environment-uuid": "DEAD-BEEF", - "machine-id": "1", - "api-addresses": "10.0.3.1:17070", - "private-address": "127.0.0.1"}) - -SAMPLE_JUJU_INFO_2 = json.dumps({"environment-uuid": "DEAD-BEEF", - "machine-id": "1", - "api-addresses": "10.0.3.2:17070", - "private-address": "127.0.0.1"}) +SAMPLE_JUJU_INFO = json.dumps( + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "1", + "api-addresses": "10.0.3.1:17070", + "private-address": "127.0.0.1", + }, +) + +SAMPLE_JUJU_INFO_2 = json.dumps( + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "1", + "api-addresses": "10.0.3.2:17070", + "private-address": "127.0.0.1", + }, +) class JujuTest(testing.HelperTestCase, testing.FSTestCase, unittest.TestCase): @@ -22,12 +30,11 @@ Config = namedtuple("Config", ["juju_filename"]) def setUp(self): - super(JujuTest, self).setUp() + super().setUp() self.stub_config = self.Config(self.makeFile()) def _create_tmp_juju_file(self, contents): - return self.makeFile( - contents, path=self.stub_config.juju_filename) + return self.makeFile(contents, path=self.stub_config.juju_filename) def test_get_juju_info(self): """ @@ -36,10 +43,14 @@ self._create_tmp_juju_file(SAMPLE_JUJU_INFO) juju_info = get_juju_info(self.stub_config) self.assertEqual( - {u"environment-uuid": "DEAD-BEEF", - u"machine-id": "1", - u"private-address": "127.0.0.1", - u"api-addresses": ["10.0.3.1:17070"]}, juju_info) + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "1", + "private-address": "127.0.0.1", + "api-addresses": ["10.0.3.1:17070"], + }, + juju_info, + ) def test_get_juju_info_empty_file(self): """ @@ -60,16 +71,23 @@ def test_get_juju_info_multiple_endpoints(self): """L{get_juju_info} turns space separated API addresses into a list.""" - juju_multiple_endpoints = json.dumps({ - "environment-uuid": "DEAD-BEEF", - "machine-id": "0", - "api-addresses": "10.0.3.1:17070 10.0.3.2:18080", - "private-address": "127.0.0.1"}) + juju_multiple_endpoints = json.dumps( + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "0", + "api-addresses": "10.0.3.1:17070 10.0.3.2:18080", + "private-address": "127.0.0.1", + }, + ) self._create_tmp_juju_file(juju_multiple_endpoints) juju_info = get_juju_info(self.stub_config) self.assertEqual( - {"environment-uuid": "DEAD-BEEF", - "machine-id": "0", - "api-addresses": ["10.0.3.1:17070", "10.0.3.2:18080"], - "private-address": "127.0.0.1"}, juju_info) + { + "environment-uuid": "DEAD-BEEF", + "machine-id": "0", + "api-addresses": ["10.0.3.1:17070", "10.0.3.2:18080"], + "private-address": "127.0.0.1", + }, + juju_info, + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_lock.py landscape-client-23.08/landscape/lib/tests/test_lock.py --- landscape-client-23.02/landscape/lib/tests/test_lock.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_lock.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,15 +1,15 @@ -import time import os +import time import unittest from landscape.lib import testing -from landscape.lib.lock import lock_path, LockError +from landscape.lib.lock import lock_path +from landscape.lib.lock import LockError class LockTest(testing.FSTestCase, unittest.TestCase): - def setUp(self): - super(LockTest, self).setUp() + super().setUp() self.filename = self.makeFile() def test_lock_creates_path(self): diff -Nru landscape-client-23.02/landscape/lib/tests/test_logging.py landscape-client-23.08/landscape/lib/tests/test_logging.py --- landscape-client-23.02/landscape/lib/tests/test_logging.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_logging.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,24 +1,36 @@ -from __future__ import absolute_import - import logging import unittest -from landscape.lib.testing import FSTestCase +from landscape.lib.logging import init_app_logging +from landscape.lib.logging import LoggingAttributeError from landscape.lib.logging import rotate_logs +from landscape.lib.testing import FSTestCase class RotateLogsTest(FSTestCase, unittest.TestCase): - def test_log_rotation(self): logging.getLogger().addHandler(logging.FileHandler(self.makeFile())) # Store the initial set of handlers - original_streams = [handler.stream for handler in - logging.getLogger().handlers if - isinstance(handler, logging.FileHandler)] + original_streams = [ + handler.stream + for handler in logging.getLogger().handlers + if isinstance(handler, logging.FileHandler) + ] rotate_logs() - new_streams = [handler.stream for handler in - logging.getLogger().handlers if - isinstance(handler, logging.FileHandler)] + new_streams = [ + handler.stream + for handler in logging.getLogger().handlers + if isinstance(handler, logging.FileHandler) + ] for stream in new_streams: self.assertTrue(stream not in original_streams) + + def test_wrong_log_level(self): + tmpdir = self.makeDir() + with self.assertRaises(LoggingAttributeError) as exp: + init_app_logging(tmpdir, "'INFO'") + self.assertTrue( + "Unknown level \"'INFO'\", conversion to " + "logging code was \"Level 'INFO'\"" in str(exp.exception), + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_lsb_release.py landscape-client-23.08/landscape/lib/tests/test_lsb_release.py --- landscape-client-23.02/landscape/lib/tests/test_lsb_release.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_lsb_release.py 2023-08-17 21:19:32.000000000 +0000 @@ -7,59 +7,73 @@ class LsbReleaseTest(testing.FSTestCase, unittest.TestCase): - def test_parse_lsb_release(self): with mock.patch("landscape.lib.lsb_release.check_output") as co_mock: - co_mock.return_value = (b"Ubuntu\nUbuntu 22.04.1 LTS\n22.04\njammy" - b"\n") + co_mock.return_value = ( + b"Ubuntu\nUbuntu 22.04.1 LTS\n22.04\njammy\n" + ) lsb_release = parse_lsb_release() - self.assertEqual(lsb_release, - {"distributor-id": "Ubuntu", - "description": "Ubuntu 22.04.1 LTS", - "release": "22.04", - "code-name": "jammy"}) + self.assertEqual( + lsb_release, + { + "distributor-id": "Ubuntu", + "description": "Ubuntu 22.04.1 LTS", + "release": "22.04", + "code-name": "jammy", + }, + ) def test_parse_lsb_release_debian(self): with mock.patch("landscape.lib.lsb_release.check_output") as co_mock: - co_mock.return_value = (b"Debian\nDebian GNU/Linux 11 (bullseye)\n" - b"11\nbullseye\n") + co_mock.return_value = ( + b"Debian\nDebian GNU/Linux 11 (bullseye)\n11\nbullseye\n" + ) lsb_release = parse_lsb_release() - self.assertEqual(lsb_release, - {"distributor-id": "Debian", - "description": "Debian GNU/Linux 11 (bullseye)", - "release": "11", - "code-name": "bullseye"}) + self.assertEqual( + lsb_release, + { + "distributor-id": "Debian", + "description": "Debian GNU/Linux 11 (bullseye)", + "release": "11", + "code-name": "bullseye", + }, + ) def test_parse_lsb_release_file(self): """ L{parse_lsb_release} returns a C{dict} holding information from the given LSB release file. """ - lsb_release_filename = self.makeFile("DISTRIB_ID=Ubuntu\n" - "DISTRIB_RELEASE=6.06\n" - "DISTRIB_CODENAME=dapper\n" - "DISTRIB_DESCRIPTION=" - "\"Ubuntu 6.06.1 LTS\"\n") + lsb_release_filename = self.makeFile( + "DISTRIB_ID=Ubuntu\n" + "DISTRIB_RELEASE=6.06\n" + "DISTRIB_CODENAME=dapper\n" + "DISTRIB_DESCRIPTION=" + '"Ubuntu 6.06.1 LTS"\n', + ) with mock.patch("landscape.lib.lsb_release.check_output") as co_mock: co_mock.side_effect = CalledProcessError(127, "") lsb_release = parse_lsb_release(lsb_release_filename) - self.assertEqual(lsb_release, - {"distributor-id": "Ubuntu", - "description": "Ubuntu 6.06.1 LTS", - "release": "6.06", - "code-name": "dapper"}) + self.assertEqual( + lsb_release, + { + "distributor-id": "Ubuntu", + "description": "Ubuntu 6.06.1 LTS", + "release": "6.06", + "code-name": "dapper", + }, + ) def test_parse_lsb_release_file_with_missing_or_extra_fields(self): """ L{parse_lsb_release} ignores lines not matching the map of known keys, and returns only keys with an actual value. """ - lsb_release_filename = self.makeFile("DISTRIB_ID=Ubuntu\n" - "FOO=Bar\n") + lsb_release_filename = self.makeFile("DISTRIB_ID=Ubuntu\nFOO=Bar\n") with mock.patch("landscape.lib.lsb_release.check_output") as co_mock: co_mock.side_effect = CalledProcessError(127, "") @@ -71,5 +85,8 @@ with mock.patch("landscape.lib.lsb_release.check_output") as co_mock: co_mock.side_effect = CalledProcessError(127, "") - self.assertRaises(FileNotFoundError, parse_lsb_release, - "TheresNoWayThisFileExists") + self.assertRaises( + FileNotFoundError, + parse_lsb_release, + "TheresNoWayThisFileExists", + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_monitor.py landscape-client-23.08/landscape/lib/tests/test_monitor.py --- landscape-client-23.02/landscape/lib/tests/test_monitor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_monitor.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,20 +1,22 @@ import unittest from landscape.lib import testing -from landscape.lib.monitor import ( - Timer, Monitor, BurstMonitor, CoverageMonitor, FrequencyMonitor) +from landscape.lib.monitor import BurstMonitor +from landscape.lib.monitor import CoverageMonitor +from landscape.lib.monitor import FrequencyMonitor +from landscape.lib.monitor import Monitor +from landscape.lib.monitor import Timer class ReactorHavingTest(testing.HelperTestCase, unittest.TestCase): def setUp(self): - super(ReactorHavingTest, self).setUp() + super().setUp() self.reactor = testing.FakeReactor() class TimerTest(ReactorHavingTest): - def setUp(self): - super(TimerTest, self).setUp() + super().setUp() self.timer = Timer(create_time=self.reactor.time) def test_since_start(self): @@ -39,9 +41,8 @@ class MonitorTest(ReactorHavingTest): - def setUp(self): - super(MonitorTest, self).setUp() + super().setUp() self.monitor = Monitor("test", create_time=self.reactor.time) def test_ping(self): @@ -70,16 +71,21 @@ self.monitor.ping() self.reactor.advance(1) self.monitor.log() - self.assertTrue("INFO: 100 test events occurred in the last 100.00s." - in self.logfile.getvalue()) + self.assertTrue( + "INFO: 100 test events occurred in the last 100.00s." + in self.logfile.getvalue(), + ) class BurstMonitorTest(ReactorHavingTest): - def setUp(self): - super(BurstMonitorTest, self).setUp() - self.monitor = BurstMonitor(60, 1, "test", - create_time=self.reactor.time) + super().setUp() + self.monitor = BurstMonitor( + 60, + 1, + "test", + create_time=self.reactor.time, + ) def test_warn_no_pings(self): self.assertFalse(self.monitor.warn()) @@ -148,11 +154,14 @@ class CoverageMonitorTest(ReactorHavingTest): - def setUp(self): - super(CoverageMonitorTest, self).setUp() - self.monitor = CoverageMonitor(1, 1.0, "test", - create_time=self.reactor.time) + super().setUp() + self.monitor = CoverageMonitor( + 1, + 1.0, + "test", + create_time=self.reactor.time, + ) def test_warn(self): self.monitor.ping() @@ -177,8 +186,12 @@ If time < interval has passed and the monitor has received some pings, it should still return 100%. """ - monitor = CoverageMonitor(10, 1.0, "test", - create_time=self.reactor.time) + monitor = CoverageMonitor( + 10, + 1.0, + "test", + create_time=self.reactor.time, + ) monitor.reset() self.reactor.advance(1) monitor.ping() @@ -221,25 +234,30 @@ self.monitor.ping() self.reactor.advance(1) self.monitor.log() - self.assertTrue("INFO: 100 of 100 expected test events (100.00%) " - "occurred in the last 100.00s." - in self.logfile.getvalue()) + self.assertTrue( + "INFO: 100 of 100 expected test events (100.00%) " + "occurred in the last 100.00s." in self.logfile.getvalue(), + ) def test_log_warning(self): for i in range(100): self.reactor.advance(1) self.monitor.log() - self.assertTrue("WARNING: 0 of 100 expected test events (0.00%) " - "occurred in the last 100.00s." - in self.logfile.getvalue()) + self.assertTrue( + "WARNING: 0 of 100 expected test events (0.00%) " + "occurred in the last 100.00s." in self.logfile.getvalue(), + ) class FrequencyMonitorTest(ReactorHavingTest): - def setUp(self): - super(FrequencyMonitorTest, self).setUp() - self.monitor = FrequencyMonitor(100, 1, "test", - create_time=self.reactor.time) + super().setUp() + self.monitor = FrequencyMonitor( + 100, + 1, + "test", + create_time=self.reactor.time, + ) def test_expected_count(self): self.assertEqual(self.monitor.expected_count, 0) @@ -270,10 +288,12 @@ self.monitor.ping() self.reactor.advance(100) self.monitor.log() - self.assertTrue("minimum expected test events" - not in self.logfile.getvalue()) + self.assertTrue( + "minimum expected test events" not in self.logfile.getvalue(), + ) self.reactor.advance(1) self.monitor.log() - self.assertTrue("WARNING: Only 0 of 1 minimum expected test events " - "occurred in the last 100.00s." - in self.logfile.getvalue()) + self.assertTrue( + "WARNING: Only 0 of 1 minimum expected test events " + "occurred in the last 100.00s." in self.logfile.getvalue(), + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_network.py landscape-client-23.08/landscape/lib/tests/test_network.py --- landscape-client-23.02/landscape/lib/tests/test_network.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_network.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,21 +1,26 @@ import socket import unittest - -from unittest.mock import ANY, DEFAULT, patch, mock_open -from netifaces import ( - AF_INET, - AF_INET6, - AF_LINK, - AF_UNIX, - ifaddresses as _ifaddresses, - interfaces as _interfaces, -) -from subprocess import Popen, PIPE +from subprocess import PIPE +from subprocess import Popen +from unittest.mock import ANY +from unittest.mock import DEFAULT +from unittest.mock import mock_open +from unittest.mock import patch + +from netifaces import AF_INET +from netifaces import AF_INET6 +from netifaces import AF_LINK +from netifaces import AF_UNIX +from netifaces import ifaddresses as _ifaddresses +from netifaces import interfaces as _interfaces from landscape.lib import testing -from landscape.lib.network import ( - get_active_device_info, get_filtered_if_info, get_fqdn, - get_network_interface_speed, get_network_traffic, is_up) +from landscape.lib.network import get_active_device_info +from landscape.lib.network import get_filtered_if_info +from landscape.lib.network import get_fqdn +from landscape.lib.network import get_network_interface_speed +from landscape.lib.network import get_network_traffic +from landscape.lib.network import is_up class BaseTestCase(testing.HelperTestCase, unittest.TestCase): @@ -23,7 +28,6 @@ class NetworkInfoTest(BaseTestCase): - @patch("landscape.lib.network.get_network_interface_speed") def test_get_active_device_info(self, mock_get_network_interface_speed): """ @@ -36,9 +40,10 @@ device_info = get_active_device_info(extended=False) process = Popen(["/sbin/ifconfig"], stdout=PIPE, env={"LC_ALL": "C"}) result = process.communicate()[0].decode("ascii") - interface_blocks = dict( - [(block.split()[0].strip(":"), block.upper()) for block in - filter(None, result.split("\n\n"))]) + interface_blocks = { + block.split()[0].strip(":"): block.upper() + for block in filter(None, result.split("\n\n")) + } for device in device_info: if device["mac_address"] == "00:00:00:00:00:00": @@ -68,56 +73,77 @@ @patch("landscape.lib.network.get_flags") @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") - def test_extended_info(self, mock_interfaces, mock_ifaddresses, - mock_get_flags, mock_get_network_interface_speed): + def test_extended_info( + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + ): mock_get_network_interface_speed.return_value = (100, True) mock_get_flags.return_value = 4163 mock_interfaces.return_value = ["test_iface"] mock_ifaddresses.return_value = { AF_LINK: [ {"addr": "aa:bb:cc:dd:ee:f0"}, - {"addr": "aa:bb:cc:dd:ee:f1"}], + {"addr": "aa:bb:cc:dd:ee:f1"}, + ], AF_INET: [ {"addr": "192.168.0.50", "netmask": "255.255.255.0"}, - {"addr": "192.168.1.50", "netmask": "255.255.255.0"}], - AF_INET6: [ - {"addr": "2001::1"}, - {"addr": "2002::2"}]} + {"addr": "192.168.1.50", "netmask": "255.255.255.0"}, + ], + AF_INET6: [{"addr": "2001::1"}, {"addr": "2002::2"}], + } device_info = get_active_device_info(extended=True) self.assertEqual( device_info, - [{ - 'interface': 'test_iface', - 'ip_address': '192.168.0.50', - 'mac_address': 'aa:bb:cc:dd:ee:f0', - 'broadcast_address': '0.0.0.0', - 'netmask': '255.255.255.0', - 'ip_addresses': { - AF_INET: [ - {'addr': '192.168.0.50', 'netmask': '255.255.255.0'}, - {'addr': '192.168.1.50', 'netmask': '255.255.255.0'}], - AF_INET6: [ - {"addr": "2001::1"}, - {"addr": "2002::2"}]}, - 'flags': 4163, - 'speed': 100, - 'duplex': True}]) + [ + { + "interface": "test_iface", + "ip_address": "192.168.0.50", + "mac_address": "aa:bb:cc:dd:ee:f0", + "broadcast_address": "0.0.0.0", + "netmask": "255.255.255.0", + "ip_addresses": { + AF_INET: [ + { + "addr": "192.168.0.50", + "netmask": "255.255.255.0", + }, + { + "addr": "192.168.1.50", + "netmask": "255.255.255.0", + }, + ], + AF_INET6: [{"addr": "2001::1"}, {"addr": "2002::2"}], + }, + "flags": 4163, + "speed": 100, + "duplex": True, + }, + ], + ) @patch("landscape.lib.network.get_network_interface_speed") @patch("landscape.lib.network.get_flags") @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") def test_skip_ipv6_only_in_non_extended_mode( - self, mock_interfaces, mock_ifaddresses, mock_get_flags, - mock_get_network_interface_speed): + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + ): mock_get_network_interface_speed.return_value = (100, True) mock_get_flags.return_value = 4163 mock_interfaces.return_value = ["test_iface"] mock_ifaddresses.return_value = { AF_LINK: [{"addr": "aa:bb:cc:dd:ee:f0"}], - AF_INET6: [{"addr": "2001::1"}]} + AF_INET6: [{"addr": "2001::1"}], + } device_info = get_active_device_info(extended=False) @@ -128,25 +154,34 @@ @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") def test_ipv6_only_in_extended_mode( - self, mock_interfaces, mock_ifaddresses, mock_get_flags, - mock_get_network_interface_speed): + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + ): mock_get_network_interface_speed.return_value = (100, True) mock_get_flags.return_value = 4163 mock_interfaces.return_value = ["test_iface"] mock_ifaddresses.return_value = { AF_LINK: [{"addr": "aa:bb:cc:dd:ee:f0"}], - AF_INET6: [{"addr": "2001::1"}]} + AF_INET6: [{"addr": "2001::1"}], + } device_info = get_active_device_info(extended=True) self.assertEqual( device_info, - [{ - 'interface': 'test_iface', - 'flags': 4163, - 'speed': 100, - 'duplex': True, - 'ip_addresses': {AF_INET6: [{'addr': '2001::1'}]}}]) + [ + { + "interface": "test_iface", + "flags": 4163, + "speed": 100, + "duplex": True, + "ip_addresses": {AF_INET6: [{"addr": "2001::1"}]}, + }, + ], + ) @patch("landscape.lib.network.get_default_interfaces") @patch("landscape.lib.network.get_network_interface_speed") @@ -154,8 +189,13 @@ @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") def test_default_only_interface( - self, mock_interfaces, mock_ifaddresses, mock_get_flags, - mock_get_network_interface_speed, mock_get_default_interfaces): + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + mock_get_default_interfaces, + ): default_iface = "test_iface" mock_get_default_interfaces.return_value = [default_iface] mock_get_network_interface_speed.return_value = (100, True) @@ -176,8 +216,7 @@ @patch("landscape.lib.network.netifaces.interfaces") def test_skip_vlan(self, mock_interfaces): """VLAN interfaces are not reported by L{get_active_device_info}.""" - mock_interfaces.side_effect = lambda: ( - _interfaces() + ["eth0.1"]) + mock_interfaces.side_effect = lambda: (_interfaces() + ["eth0.1"]) device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("eth0.1", interfaces) @@ -185,8 +224,7 @@ @patch("landscape.lib.network.netifaces.interfaces") def test_skip_alias(self, mock_interfaces): """Interface aliases are not reported by L{get_active_device_info}.""" - mock_interfaces.side_effect = lambda: ( - _interfaces() + ["eth0:foo"]) + mock_interfaces.side_effect = lambda: (_interfaces() + ["eth0:foo"]) device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("eth0:foo", interfaces) @@ -207,7 +245,8 @@ def test_skip_iface_with_no_addr(self, mock_interfaces, mock_ifaddresses): mock_interfaces.return_value = _interfaces() + ["test_iface"] mock_ifaddresses.side_effect = lambda iface: ( - _ifaddresses(iface) if iface in _interfaces() else {}) + _ifaddresses(iface) if iface in _interfaces() else {} + ) device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("test_iface", interfaces) @@ -217,7 +256,8 @@ def test_skip_iface_with_no_ip(self, mock_interfaces, mock_ifaddresses): mock_interfaces.return_value = _interfaces() + ["test_iface"] mock_ifaddresses.side_effect = lambda iface: ( - _ifaddresses(iface) if iface in _interfaces() else {AF_UNIX: []}) + _ifaddresses(iface) if iface in _interfaces() else {AF_UNIX: []} + ) device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("test_iface", interfaces) @@ -227,14 +267,19 @@ @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") def test_skip_iface_down( - self, mock_interfaces, mock_ifaddresses, mock_get_flags, - mock_get_network_interface_speed): + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + ): mock_get_network_interface_speed.return_value = (100, True) mock_get_flags.return_value = 0 mock_interfaces.return_value = ["test_iface"] mock_ifaddresses.return_value = { AF_LINK: [{"addr": "aa:bb:cc:dd:ee:f0"}], - AF_INET: [{"addr": "192.168.1.50", "netmask": "255.255.255.0"}]} + AF_INET: [{"addr": "192.168.1.50", "netmask": "255.255.255.0"}], + } device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("test_iface", interfaces) @@ -244,8 +289,12 @@ @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") def test_no_macaddr_no_netmask_no_broadcast( - self, mock_interfaces, mock_ifaddresses, mock_get_flags, - mock_get_network_interface_speed): + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + ): mock_get_network_interface_speed.return_value = (100, True) mock_get_flags.return_value = 4163 mock_interfaces.return_value = ["test_iface"] @@ -253,15 +302,19 @@ device_info = get_active_device_info(extended=False) self.assertEqual( device_info, - [{ - 'interface': 'test_iface', - 'ip_address': '192.168.0.50', - 'broadcast_address': '0.0.0.0', - 'mac_address': '', - 'netmask': '', - 'flags': 4163, - 'speed': 100, - 'duplex': True}]) + [ + { + "interface": "test_iface", + "ip_address": "192.168.0.50", + "broadcast_address": "0.0.0.0", + "mac_address": "", + "netmask": "", + "flags": 4163, + "speed": 100, + "duplex": True, + }, + ], + ) def test_get_network_traffic(self): """ @@ -271,7 +324,7 @@ m = mock_open(read_data=test_proc_net_dev_output) # Trusty's version of `mock.mock_open` does not support `readlines()`. m().readlines = test_proc_net_dev_output.splitlines - with patch('landscape.lib.network.open', m, create=True): + with patch("landscape.lib.network.open", m, create=True): traffic = get_network_traffic() m.assert_called_with("/proc/net/dev", "r") self.assertEqual(traffic, test_proc_net_dev_parsed) @@ -281,28 +334,34 @@ @patch("landscape.lib.network.netifaces.ifaddresses") @patch("landscape.lib.network.netifaces.interfaces") def test_ipv6_skip_link_local( - self, mock_interfaces, mock_ifaddresses, mock_get_flags, - mock_get_network_interface_speed): + self, + mock_interfaces, + mock_ifaddresses, + mock_get_flags, + mock_get_network_interface_speed, + ): mock_get_network_interface_speed.return_value = (100, True) mock_get_flags.return_value = 4163 mock_interfaces.return_value = ["test_iface"] mock_ifaddresses.return_value = { - AF_LINK: [ - {"addr": "aa:bb:cc:dd:ee:f1"}], - AF_INET6: [ - {"addr": "fe80::1"}, - {"addr": "2001::1"}]} + AF_LINK: [{"addr": "aa:bb:cc:dd:ee:f1"}], + AF_INET6: [{"addr": "fe80::1"}, {"addr": "2001::1"}], + } device_info = get_active_device_info(extended=True) self.assertEqual( device_info, - [{ - 'interface': 'test_iface', - 'flags': 4163, - 'speed': 100, - 'duplex': True, - 'ip_addresses': {AF_INET6: [{"addr": "2001::1"}]}}]) + [ + { + "interface": "test_iface", + "flags": 4163, + "speed": 100, + "duplex": True, + "ip_addresses": {AF_INET6: [{"addr": "2001::1"}]}, + }, + ], + ) def test_is_up(self): self.assertTrue(is_up(1)) @@ -317,10 +376,10 @@ return interface.startswith("tap") with patch.multiple( - "landscape.lib.network", - get_flags=DEFAULT, - get_network_interface_speed=DEFAULT, - netifaces=DEFAULT, + "landscape.lib.network", + get_flags=DEFAULT, + get_network_interface_speed=DEFAULT, + netifaces=DEFAULT, ) as mocks: mocks["netifaces"].interfaces.return_value = [ "tap0123", @@ -330,8 +389,10 @@ ] mocks["get_flags"].return_value = 4163 mocks["get_network_interface_speed"].return_value = (100, True) - device_info = get_filtered_if_info(filters=(filter_tap,), - extended=True) + device_info = get_filtered_if_info( + filters=(filter_tap,), + extended=True, + ) self.assertEqual(len(device_info), 2) self.assertTrue(all("tap" not in i["interface"] for i in device_info)) @@ -341,10 +402,10 @@ Tests that tap network interfaces are filtered out. """ with patch.multiple( - "landscape.lib.network", - get_flags=DEFAULT, - get_network_interface_speed=DEFAULT, - netifaces=DEFAULT, + "landscape.lib.network", + get_flags=DEFAULT, + get_network_interface_speed=DEFAULT, + netifaces=DEFAULT, ) as mocks: mocks["netifaces"].interfaces.return_value = [ "tap0123", @@ -372,42 +433,46 @@ """ test_proc_net_dev_parsed = { - "lo": {"recv_bytes": 3272627934, - "recv_packets": 3321049, - "recv_errs": 0, - "recv_drop": 0, - "recv_fifo": 0, - "recv_frame": 0, - "recv_compressed": 0, - "recv_multicast": 0, - "send_bytes": 3272627934, - "send_packets": 3321049, - "send_errs": 0, - "send_drop": 0, - "send_fifo": 0, - "send_colls": 0, - "send_carrier": 0, - "send_compressed": 0}, - "eth0": {"recv_bytes": 6063748, - "recv_packets": 12539, - "recv_errs": 0, - "recv_drop": 0, - "recv_fifo": 0, - "recv_frame": 0, - "recv_compressed": 0, - "recv_multicast": 62, - "send_bytes": 2279693, - "send_packets": 12579, - "send_errs": 0, - "send_drop": 0, - "send_fifo": 0, - "send_colls": 19, - "send_carrier": 0, - "send_compressed": 0}} + "lo": { + "recv_bytes": 3272627934, + "recv_packets": 3321049, + "recv_errs": 0, + "recv_drop": 0, + "recv_fifo": 0, + "recv_frame": 0, + "recv_compressed": 0, + "recv_multicast": 0, + "send_bytes": 3272627934, + "send_packets": 3321049, + "send_errs": 0, + "send_drop": 0, + "send_fifo": 0, + "send_colls": 0, + "send_carrier": 0, + "send_compressed": 0, + }, + "eth0": { + "recv_bytes": 6063748, + "recv_packets": 12539, + "recv_errs": 0, + "recv_drop": 0, + "recv_fifo": 0, + "recv_frame": 0, + "recv_compressed": 0, + "recv_multicast": 62, + "send_bytes": 2279693, + "send_packets": 12579, + "send_errs": 0, + "send_drop": 0, + "send_fifo": 0, + "send_colls": 19, + "send_carrier": 0, + "send_compressed": 0, + }, +} class FQDNTest(BaseTestCase): - def test_default_fqdn(self): """ C{get_fqdn} returns the output of C{socket.getfqdn} if it returns @@ -428,7 +493,6 @@ class NetworkInterfaceSpeedTest(BaseTestCase): - @patch("struct.unpack") @patch("fcntl.ioctl") def test_get_network_interface_speed(self, mock_ioctl, mock_unpack): @@ -436,7 +500,10 @@ The link speed is reported as unpacked from the ioctl() call. """ sock = socket.socket( - socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + ) # ioctl always succeeds mock_unpack.return_value = (100, False) @@ -455,7 +522,10 @@ The link speed for an unplugged interface is reported as 0. """ sock = socket.socket( - socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + ) # ioctl always succeeds mock_unpack.return_value = (65535, False) @@ -476,7 +546,10 @@ If such an error is rasied, report the speed as -1. """ sock = socket.socket( - socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + ) theerror = IOError() theerror.errno = 95 theerror.message = "Operation not supported" @@ -499,7 +572,10 @@ not report the network speed. """ sock = socket.socket( - socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + ) theerror = IOError() theerror.errno = 1 theerror.message = "Operation not permitted" @@ -521,7 +597,10 @@ exception should be raised. """ sock = socket.socket( - socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_IP, + ) theerror = IOError() theerror.errno = 999 theerror.message = "Whatever" diff -Nru landscape-client-23.02/landscape/lib/tests/test_persist.py landscape-client-23.08/landscape/lib/tests/test_persist.py --- landscape-client-23.02/landscape/lib/tests/test_persist.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_persist.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,9 +3,13 @@ import unittest from landscape.lib import testing -from landscape.lib.persist import ( - path_string_to_tuple, path_tuple_to_string, Persist, RootedPersist, - PickleBackend, PersistError, PersistReadOnlyError) +from landscape.lib.persist import path_string_to_tuple +from landscape.lib.persist import path_tuple_to_string +from landscape.lib.persist import Persist +from landscape.lib.persist import PersistError +from landscape.lib.persist import PersistReadOnlyError +from landscape.lib.persist import PickleBackend +from landscape.lib.persist import RootedPersist class PersistHelpersTest(unittest.TestCase): @@ -19,7 +23,7 @@ ("ab.cd[1]", ("ab", "cd", 1)), ("ab[0].cd[1]", ("ab", 0, "cd", 1)), ("ab.cd.de[2]", ("ab", "cd", "de", 2)), - ] + ] def test_path_string_to_tuple(self): for path_string, path_tuple in self.paths: @@ -49,67 +53,67 @@ ("qr", {"s": {"t": "u"}}), ("v", [0, {}, 2]), ("v[1].v", "woot"), - ] + ] set_result = { - "ab": 2, - "cd": { - "ef": 3.4, - "gh": "5", - "ij": { - "kl": (1, 2.3, "4", [5], (6,)), - "mn": [1, 2.3, "4", [5], (6,)], - "op": [0, 1, 2] - }, - }, - "qr": {"s": {"t": "u"}}, - "v": [0, {"v": "woot"}, 2], - } + "ab": 2, + "cd": { + "ef": 3.4, + "gh": "5", + "ij": { + "kl": (1, 2.3, "4", [5], (6,)), + "mn": [1, 2.3, "4", [5], (6,)], + "op": [0, 1, 2], + }, + }, + "qr": {"s": {"t": "u"}}, + "v": [0, {"v": "woot"}, 2], + } get_items = [ - ("ab", 2), - ("cd.ef", 3.4), - ("cd.gh", "5"), - ("cd.ij.kl", (1, 2.3, "4", [5], (6,))), - ("cd.ij.kl[3]", [5]), - ("cd.ij.kl[3][0]", 5), - ("cd.ij.mn", [1, 2.3, "4", [5], (6,)]), - ("cd.ij.mn.4", "4"), - ("cd.ij.mn.5", None), - ("cd.ij.op", [0, 1, 2]), - ("cd.ij.op[0]", 0), - ("cd.ij.op[1]", 1), - ("cd.ij.op[2]", 2), - ("cd.ij.op[3]", None), - ("qr", {"s": {"t": "u"}}), - ("qr.s", {"t": "u"}), - ("qr.s.t", "u"), - ("x", None), - ("x.y.z", None), - ] + ("ab", 2), + ("cd.ef", 3.4), + ("cd.gh", "5"), + ("cd.ij.kl", (1, 2.3, "4", [5], (6,))), + ("cd.ij.kl[3]", [5]), + ("cd.ij.kl[3][0]", 5), + ("cd.ij.mn", [1, 2.3, "4", [5], (6,)]), + ("cd.ij.mn.4", "4"), + ("cd.ij.mn.5", None), + ("cd.ij.op", [0, 1, 2]), + ("cd.ij.op[0]", 0), + ("cd.ij.op[1]", 1), + ("cd.ij.op[2]", 2), + ("cd.ij.op[3]", None), + ("qr", {"s": {"t": "u"}}), + ("qr.s", {"t": "u"}), + ("qr.s.t", "u"), + ("x", None), + ("x.y.z", None), + ] add_items = [ - ("ab", 1), - ("ab", 2.3), - ("ab", "4"), - ("ab", [5]), - ("ab", (6,)), - ("ab", {}), - ("ab[5].cd", "foo"), - ("ab[5].cd", "bar"), - ] + ("ab", 1), + ("ab", 2.3), + ("ab", "4"), + ("ab", [5]), + ("ab", (6,)), + ("ab", {}), + ("ab[5].cd", "foo"), + ("ab[5].cd", "bar"), + ] add_result = { - "ab": [1, 2.3, "4", [5], (6,), {"cd": ["foo", "bar"]}], - } + "ab": [1, 2.3, "4", [5], (6,), {"cd": ["foo", "bar"]}], + } def setUp(self): - super(BasePersistTest, self).setUp() + super().setUp() self.persist = self.build_persist() def tearDown(self): del self.persist - super(BasePersistTest, self).tearDown() + super().tearDown() def build_persist(self, *args, **kwargs): return Persist(*args, **kwargs) @@ -117,31 +121,39 @@ def format(self, result, expected): repr_result = pprint.pformat(result) repr_expected = pprint.pformat(expected) - return "\nResult:\n%s\nExpected:\n%s\n" % (repr_result, repr_expected) + return f"\nResult:\n{repr_result}\nExpected:\n{repr_expected}\n" class GeneralPersistTest(BasePersistTest): - def test_set(self): for path, value in self.set_items: self.persist.set(path, value) result = self.persist.get((), hard=True) - self.assertEqual(result, self.set_result, - self.format(result, self.set_result)) + self.assertEqual( + result, + self.set_result, + self.format(result, self.set_result), + ) def test_set_tuple_paths(self): for path, value in self.set_items: self.persist.set(path_string_to_tuple(path), value) result = self.persist.get((), hard=True) - self.assertEqual(result, self.set_result, - self.format(result, self.set_result)) + self.assertEqual( + result, + self.set_result, + self.format(result, self.set_result), + ) def test_set_from_result(self): for path in self.set_result: self.persist.set(path, self.set_result[path]) result = self.persist.get((), hard=True) - self.assertEqual(result, self.set_result, - self.format(result, self.set_result)) + self.assertEqual( + result, + self.set_result, + self.format(result, self.set_result), + ) def test_get(self): for path in self.set_result: @@ -159,8 +171,11 @@ for path, value in self.add_items: self.persist.add(path, value) result = self.persist.get((), hard=True) - self.assertEqual(result, self.add_result, - self.format(result, self.add_result)) + self.assertEqual( + result, + self.add_result, + self.format(result, self.add_result), + ) def test_add_unique(self): self.persist.add("a", "b") @@ -175,11 +190,11 @@ def test_keys(self): self.persist.set("a", {"b": 1, "c": {"d": 2}, "e": list("foo")}) keys = self.persist.keys - self.assertEqual(set(keys((), hard=True)), set(["a"])) - self.assertEqual(set(keys("a")), set(["b", "c", "e"])) - self.assertEqual(set(keys("a.d")), set([])) - self.assertEqual(set(keys("a.e")), set([0, 1, 2])) - self.assertEqual(set(keys("a.f")), set([])) + self.assertEqual(set(keys((), hard=True)), {"a"}) + self.assertEqual(set(keys("a")), {"b", "c", "e"}) + self.assertEqual(set(keys("a.d")), set()) + self.assertEqual(set(keys("a.e")), {0, 1, 2}) + self.assertEqual(set(keys("a.f")), set()) self.assertRaises(PersistError, keys, "a.b") def test_has(self): @@ -277,8 +292,7 @@ class SaveLoadPersistTest(testing.FSTestCase, BasePersistTest): - - def makePersistFile(self, *args, **kwargs): + def makePersistFile(self, *args, **kwargs): # noqa: N802 return self.makeFile(*args, backupsuffix=".old", **kwargs) def test_readonly(self): @@ -335,8 +349,11 @@ persist.load(filename) result = persist.get((), hard=True) - self.assertEqual(result, self.set_result, - self.format(result, self.set_result)) + self.assertEqual( + result, + self.set_result, + self.format(result, self.set_result), + ) def test_save_on_unexistent_dir(self): dirname = self.makePersistFile() @@ -443,20 +460,23 @@ class PicklePersistTest(GeneralPersistTest, SaveLoadPersistTest): - def build_persist(self, *args, **kwargs): return Persist(PickleBackend(), *args, **kwargs) class RootedPersistTest(GeneralPersistTest): - def build_persist(self, *args, **kwargs): return RootedPersist(Persist(), "root.path", *args, **kwargs) def test_readonly(self): self.assertFalse(self.persist.readonly) - self.assertRaises(AttributeError, - setattr, self.persist, "readonly", True) + self.assertRaises( + AttributeError, + setattr, + self.persist, + "readonly", + True, + ) self.persist.parent.readonly = True self.assertTrue(self.persist.readonly) diff -Nru landscape-client-23.02/landscape/lib/tests/test_plugin.py landscape-client-23.08/landscape/lib/tests/test_plugin.py --- landscape-client-23.02/landscape/lib/tests/test_plugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_plugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -6,7 +6,7 @@ from landscape.lib.plugin import PluginRegistry -class SamplePlugin(object): +class SamplePlugin: plugin_name = "sample" def __init__(self): @@ -20,7 +20,7 @@ """A plugin which records exchange notification events.""" def __init__(self): - super(ExchangePlugin, self).__init__() + super().__init__() self.exchanged = 0 self.waiter = None @@ -34,11 +34,13 @@ self.waiter.callback(None) -class PluginTest(testing.FSTestCase, testing.TwistedTestCase, - unittest.TestCase): - +class PluginTest( + testing.FSTestCase, + testing.TwistedTestCase, + unittest.TestCase, +): def setUp(self): - super(PluginTest, self).setUp() + super().setUp() self.registry = PluginRegistry() def test_register_plugin(self): diff -Nru landscape-client-23.02/landscape/lib/tests/test_process.py landscape-client-23.08/landscape/lib/tests/test_process.py --- landscape-client-23.02/landscape/lib/tests/test_process.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_process.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,16 @@ -import mock import os import unittest +from unittest import mock from landscape.lib import testing -from landscape.lib.process import calculate_pcpu, ProcessInformation from landscape.lib.fs import create_text_file +from landscape.lib.process import calculate_pcpu +from landscape.lib.process import ProcessInformation class ProcessInfoTest(testing.FSTestCase, unittest.TestCase): - def setUp(self): - super(ProcessInfoTest, self).setUp() + super().setUp() self.proc_dir = self.makeDir() def _add_process_info(self, process_id, state="R (running)"): @@ -26,13 +26,16 @@ cmd_line = "/usr/bin/foo" create_text_file(os.path.join(process_dir, "cmdline"), cmd_line) - status = "\n".join([ - "Name: foo", - "State: %s" % state, - "Uid: 1000", - "Gid: 2000", - "VmSize: 3000", - "Ignored: value"]) + status = "\n".join( + [ + "Name: foo", + f"State: {state}", + "Uid: 1000", + "Gid: 2000", + "VmSize: 3000", + "Ignored: value", + ], + ) create_text_file(os.path.join(process_dir, "status"), status) stat_array = [str(index) for index in range(44)] @@ -42,16 +45,19 @@ @mock.patch("landscape.lib.process.detect_jiffies", return_value=1) @mock.patch("os.listdir") @mock.patch("landscape.lib.sysstats.get_uptime") - def test_missing_process_race(self, get_uptime_mock, list_dir_mock, - jiffies_mock): + def test_missing_process_race( + self, + get_uptime_mock, + list_dir_mock, + jiffies_mock, + ): """ We use os.listdir("/proc") to get the list of active processes, if a process ends before we attempt to read the process' information, then this should not trigger an error. """ - class FakeFile(object): - + class FakeFile: def __init__(self, response=""): self._response = response self.closed = False @@ -61,7 +67,7 @@ def __iter__(self): if self._response is None: - raise IOError("Fake file error") + raise OSError("Fake file error") else: yield self._response @@ -73,15 +79,18 @@ fakefile1 = FakeFile("test-binary") fakefile2 = FakeFile(None) with mock.patch( - "landscape.lib.process.open", mock.mock_open(), create=True, - ) as open_mock: + "landscape.lib.process.open", + mock.mock_open(), + create=True, + ) as open_mock: # This means "return fakefile1, then fakefile2" open_mock.side_effect = [fakefile1, fakefile2] process_info = ProcessInformation("/proc") processes = list(process_info.get_all_process_info()) calls = [ mock.call("/proc/12345/cmdline", "r"), - mock.call("/proc/12345/status", "r")] + mock.call("/proc/12345/status", "r"), + ] open_mock.assert_has_calls(calls) self.assertEqual(processes, []) list_dir_mock.assert_called_with("/proc") @@ -146,7 +155,9 @@ def test_calculate_pcpu_real_data(self): self.assertEqual( - calculate_pcpu(51286, 5000, 19000.07, 9281.0, 100), 3.0) + calculate_pcpu(51286, 5000, 19000.07, 9281.0, 100), + 3.0, + ) def test_calculate_pcpu(self): """ @@ -155,8 +166,7 @@ This should be cpu utilisation of 20% """ - self.assertEqual(calculate_pcpu(8000, 2000, 1000, 50000, 100), - 20.0) + self.assertEqual(calculate_pcpu(8000, 2000, 1000, 50000, 100), 20.0) def test_calculate_pcpu_capped(self): """ @@ -166,8 +176,7 @@ This should be cpu utilisation of 200% but capped at 99% CPU utilisation. """ - self.assertEqual(calculate_pcpu(98000, 2000, 1000, 50000, 100), - 99.0) + self.assertEqual(calculate_pcpu(98000, 2000, 1000, 50000, 100), 99.0) def test_calculate_pcpu_floored(self): """ diff -Nru landscape-client-23.02/landscape/lib/tests/test_reactor.py landscape-client-23.08/landscape/lib/tests/test_reactor.py --- landscape-client-23.02/landscape/lib/tests/test_reactor.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_reactor.py 2023-08-17 21:19:32.000000000 +0000 @@ -8,8 +8,7 @@ from landscape.lib.testing import FakeReactor -class ReactorTestMixin(object): - +class ReactorTestMixin: def test_call_later(self): reactor = self.get_reactor() called = [] @@ -17,6 +16,7 @@ def dummy(): called.append("Hello!") reactor.stop() + reactor.call_later(0, dummy) reactor.run() self.assertEqual(called, ["Hello!"]) @@ -28,6 +28,7 @@ def dummy(a, b=3): called.append((a, b)) reactor.stop() + reactor.call_later(0, dummy, "a", b="b") reactor.run() self.assertEqual(called, [("a", "b")]) @@ -39,6 +40,7 @@ def append(): called.append("Hey!") return True + reactor.call_later(0, append) reactor.call_later(0.3, reactor.stop) reactor.run() @@ -77,6 +79,7 @@ def cancel_call(): reactor.cancel_call(id) called.append("hi") + id = reactor.call_every(0, cancel_call) reactor.call_later(0.1, reactor.stop) reactor.run() @@ -113,6 +116,7 @@ def handle_foobar(): called.append(True) + reactor.call_on("foobar", handle_foobar) reactor.fire("foobar") self.assertEqual(called, [True]) @@ -127,6 +131,7 @@ def handle_foobar(): called.append(True) + reactor.call_on("foobar", handle_foobar) reactor.fire("foobar") reactor.fire("foobar") @@ -138,6 +143,7 @@ def handle_foobar(a, b=3): called.append((a, b)) + reactor.call_on("foobar", handle_foobar) reactor.fire("foobar", "a", b=6) self.assertEqual(called, [("a", 6)]) @@ -213,8 +219,10 @@ self.assertTrue(1 in called) self.assertTrue(3 in called) self.assertTrue("handle_two" in self.logfile.getvalue()) - self.assertTrue("ZeroDivisionError" in self.logfile.getvalue(), - self.logfile.getvalue()) + self.assertTrue( + "ZeroDivisionError" in self.logfile.getvalue(), + self.logfile.getvalue(), + ) def test_weird_event_type(self): # This can be useful for "namespaced" event types @@ -344,8 +352,10 @@ reactor.run() self.assertEqual(called, ["f"]) - self.assertTrue("ZeroDivisionError" in self.logfile.getvalue(), - self.logfile.getvalue()) + self.assertTrue( + "ZeroDivisionError" in self.logfile.getvalue(), + self.logfile.getvalue(), + ) def test_call_in_main(self): reactor = self.get_reactor() @@ -395,9 +405,11 @@ self.assertEqual([True], calls) -class FakeReactorTest(testing.HelperTestCase, ReactorTestMixin, - unittest.TestCase): - +class FakeReactorTest( + testing.HelperTestCase, + ReactorTestMixin, + unittest.TestCase, +): def get_reactor(self): return FakeReactor() @@ -429,9 +441,11 @@ self.assertEqual(reactor.time(), 13.5) -class EventHandlingReactorTest(testing.HelperTestCase, ReactorTestMixin, - unittest.TestCase): - +class EventHandlingReactorTest( + testing.HelperTestCase, + ReactorTestMixin, + unittest.TestCase, +): def get_reactor(self): reactor = EventHandlingReactor() # It's not possible to stop the reactor in a Trial test, calling diff -Nru landscape-client-23.02/landscape/lib/tests/test_schema.py landscape-client-23.08/landscape/lib/tests/test_schema.py --- landscape-client-23.02/landscape/lib/tests/test_schema.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_schema.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,23 +1,31 @@ import unittest -from landscape.lib.schema import ( - InvalidError, Constant, Bool, Int, Float, Bytes, Unicode, List, KeyDict, - Dict, Tuple, Any) - from twisted.python.compat import long +from landscape.lib.schema import Any +from landscape.lib.schema import Bool +from landscape.lib.schema import Bytes +from landscape.lib.schema import Constant +from landscape.lib.schema import Dict +from landscape.lib.schema import Float +from landscape.lib.schema import Int +from landscape.lib.schema import InvalidError +from landscape.lib.schema import KeyDict +from landscape.lib.schema import List +from landscape.lib.schema import Tuple +from landscape.lib.schema import Unicode + -class DummySchema(object): +class DummySchema: def coerce(self, value): return "hello!" class BasicTypesTest(unittest.TestCase): - def test_any(self): schema = Any(Constant(None), Unicode()) self.assertEqual(schema.coerce(None), None) - self.assertEqual(schema.coerce(u"foo"), u"foo") + self.assertEqual(schema.coerce("foo"), "foo") def test_any_bad(self): schema = Any(Constant(None), Unicode()) @@ -70,13 +78,13 @@ self.assertEqual(Bytes().coerce(b"foo"), b"foo") def test_string_unicode(self): - self.assertEqual(Bytes().coerce(u"foo"), b"foo") + self.assertEqual(Bytes().coerce("foo"), b"foo") def test_string_bad_anything(self): self.assertRaises(InvalidError, Bytes().coerce, object()) def test_unicode(self): - self.assertEqual(Unicode().coerce(u"foo"), u"foo") + self.assertEqual(Unicode().coerce("foo"), "foo") def test_unicode_bad_value(self): """Invalid values raise an errors.""" @@ -84,16 +92,17 @@ def test_unicode_with_str(self): """Unicode accept byte strings and return a unicode.""" - self.assertEqual(Unicode().coerce(b"foo"), u"foo") + self.assertEqual(Unicode().coerce(b"foo"), "foo") def test_unicode_decodes(self): """Unicode should decode plain strings.""" - a = u"\N{HIRAGANA LETTER A}" + a = "\N{HIRAGANA LETTER A}" self.assertEqual(Unicode().coerce(a.encode("utf-8")), a) - letter = u"\N{LATIN SMALL LETTER A WITH GRAVE}" + letter = "\N{LATIN SMALL LETTER A WITH GRAVE}" self.assertEqual( Unicode(encoding="latin-1").coerce(letter.encode("latin-1")), - letter) + letter, + ) def test_unicode_or_str_bad_encoding(self): """Decoding errors should be converted to InvalidErrors.""" @@ -112,7 +121,7 @@ self.assertRaises(InvalidError, List(Int()).coerce, ["hello"]) def test_list_multiple_items(self): - a = u"\N{HIRAGANA LETTER A}" + a = "\N{HIRAGANA LETTER A}" schema = List(Unicode()) self.assertEqual(schema.coerce([a, a.encode("utf-8")]), [a, a]) @@ -120,8 +129,10 @@ self.assertEqual(Tuple(Int()).coerce((1,)), (1,)) def test_tuple_coerces(self): - self.assertEqual(Tuple(Int(), DummySchema()).coerce((23, object())), - (23, "hello!")) + self.assertEqual( + Tuple(Int(), DummySchema()).coerce((23, object())), + (23, "hello!"), + ) def test_tuple_bad(self): self.assertRaises(InvalidError, Tuple().coerce, object()) @@ -136,28 +147,39 @@ self.assertRaises(InvalidError, Tuple(Int()).coerce, (1, 2)) def test_key_dict(self): - self.assertEqual(KeyDict({"foo": Int()}).coerce({"foo": 1}), - {"foo": 1}) + self.assertEqual( + KeyDict({"foo": Int()}).coerce({"foo": 1}), + {"foo": 1}, + ) def test_key_dict_coerces(self): - self.assertEqual(KeyDict({"foo": DummySchema()}).coerce({"foo": 3}), - {"foo": "hello!"}) + self.assertEqual( + KeyDict({"foo": DummySchema()}).coerce({"foo": 3}), + {"foo": "hello!"}, + ) def test_key_dict_bad_inner_schema(self): - self.assertRaises(InvalidError, KeyDict({"foo": Int()}).coerce, - {"foo": "hello"}) + self.assertRaises( + InvalidError, + KeyDict({"foo": Int()}).coerce, + {"foo": "hello"}, + ) def test_key_dict_unknown_key(self): self.assertRaises(InvalidError, KeyDict({}).coerce, {"foo": 1}) + def test_key_dict_unknown_key_not_strict(self): + self.assertEqual( + KeyDict({"foo": Int()}, strict=False).coerce({"foo": 1, "bar": 2}), + {"foo": 1}) + def test_key_dict_bad(self): self.assertRaises(InvalidError, KeyDict({}).coerce, object()) def test_key_dict_multiple_items(self): schema = KeyDict({"one": Int(), "two": List(Float())}) input = {"one": 32, "two": [1.5, 2.3]} - self.assertEqual(schema.coerce(input), - {"one": 32, "two": [1.5, 2.3]}) + self.assertEqual(schema.coerce(input), {"one": 32, "two": [1.5, 2.3]}) def test_key_dict_arbitrary_keys(self): """ @@ -176,25 +198,26 @@ self.assertRaises(InvalidError, schema.coerce, {}) def test_key_dict_optional_keys(self): - """KeyDict allows certain keys to be optional. - """ + """KeyDict allows certain keys to be optional.""" schema = KeyDict({"foo": Int(), "bar": Int()}, optional=["bar"]) self.assertEqual(schema.coerce({"foo": 32}), {"foo": 32}) def test_pass_optional_key(self): - """Regression test. It should be possible to pass an optional key. - """ + """Regression test. It should be possible to pass an optional key.""" schema = KeyDict({"foo": Int()}, optional=["foo"]) self.assertEqual(schema.coerce({"foo": 32}), {"foo": 32}) def test_dict(self): - self.assertEqual(Dict(Int(), Bytes()).coerce({32: b"hello."}), - {32: b"hello."}) + self.assertEqual( + Dict(Int(), Bytes()).coerce({32: b"hello."}), + {32: b"hello."}, + ) def test_dict_coerces(self): self.assertEqual( Dict(DummySchema(), DummySchema()).coerce({32: object()}), - {"hello!": "hello!"}) + {"hello!": "hello!"}, + ) def test_dict_inner_bad(self): self.assertRaises(InvalidError, Dict(Int(), Int()).coerce, {"32": 32}) diff -Nru landscape-client-23.02/landscape/lib/tests/test_scriptcontent.py landscape-client-23.08/landscape/lib/tests/test_scriptcontent.py --- landscape-client-23.02/landscape/lib/tests/test_scriptcontent.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_scriptcontent.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,18 @@ import unittest -from landscape.lib.scriptcontent import (build_script, generate_script_hash) +from landscape.lib.scriptcontent import build_script +from landscape.lib.scriptcontent import generate_script_hash class ScriptContentTest(unittest.TestCase): - def test_concatenate(self): - self.assertEqual(build_script(u"/bin/sh", u"echo 1.0\n"), - "#!/bin/sh\necho 1.0\n") + self.assertEqual( + build_script("/bin/sh", "echo 1.0\n"), + "#!/bin/sh\necho 1.0\n", + ) def test_concatenate_null_strings(self): - self.assertEqual(build_script(None, None), - "#!\n") + self.assertEqual(build_script(None, None), "#!\n") def test_generate_script_hash(self): hash1 = generate_script_hash("#!/bin/sh\necho 1.0\n") diff -Nru landscape-client-23.02/landscape/lib/tests/test_sequenceranges.py landscape-client-23.08/landscape/lib/tests/test_sequenceranges.py --- landscape-client-23.02/landscape/lib/tests/test_sequenceranges.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_sequenceranges.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,15 @@ import unittest -from landscape.lib.sequenceranges import ( - SequenceRanges, remove_from_ranges, add_to_ranges, find_ranges_index, - ranges_to_sequence, sequence_to_ranges, SequenceError) +from landscape.lib.sequenceranges import add_to_ranges +from landscape.lib.sequenceranges import find_ranges_index +from landscape.lib.sequenceranges import ranges_to_sequence +from landscape.lib.sequenceranges import remove_from_ranges +from landscape.lib.sequenceranges import sequence_to_ranges +from landscape.lib.sequenceranges import SequenceError +from landscape.lib.sequenceranges import SequenceRanges class SequenceRangesTest(unittest.TestCase): - def setUp(self): self.ranges = [1, 2, (15, 17), 19, (21, 24), 26, 27] self.sequence = [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27] @@ -82,7 +85,6 @@ class SequenceToRangesTest(unittest.TestCase): - def test_empty(self): self.assertEqual(list(sequence_to_ranges([])), []) @@ -97,8 +99,10 @@ def test_many_elements(self): sequence = [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27] - self.assertEqual(list(sequence_to_ranges(sequence)), - [1, 2, (15, 17), 19, (21, 24), 26, 27]) + self.assertEqual( + list(sequence_to_ranges(sequence)), + [1, 2, (15, 17), 19, (21, 24), 26, 27], + ) def test_out_of_order(self): self.assertRaises(SequenceError, next, sequence_to_ranges([2, 1])) @@ -108,7 +112,6 @@ class RangesToSequenceTest(unittest.TestCase): - def test_empty(self): self.assertEqual(list(ranges_to_sequence([])), []) @@ -123,8 +126,10 @@ def test_many_elements(self): ranges = [1, 2, (15, 17), 19, (21, 24), 26, 27] - self.assertEqual(list(ranges_to_sequence(ranges)), - [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27]) + self.assertEqual( + list(ranges_to_sequence(ranges)), + [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27], + ) def test_invalid_range(self): """ @@ -135,7 +140,6 @@ class FindRangesIndexTest(unittest.TestCase): - def test_empty(self): self.assertEqual(find_ranges_index([], 2), 0) @@ -173,7 +177,6 @@ class AddToRangesTest(unittest.TestCase): - def test_empty(self): ranges = [] add_to_ranges(ranges, 1) @@ -235,7 +238,6 @@ class RemoveFromRangesTest(unittest.TestCase): - def test_empty(self): ranges = [] remove_from_ranges(ranges, 1) @@ -293,11 +295,13 @@ def test_suite(): - return unittest.TestSuite(( - unittest.makeSuite(SequenceToRangesTest), - unittest.makeSuite(RangesToSequenceTest), - unittest.makeSuite(SequenceRangesTest), - unittest.makeSuite(FindRangesIndexTest), - unittest.makeSuite(AddToRangesTest), - unittest.makeSuite(RemoveFromRangesTest), - )) + return unittest.TestSuite( + ( + unittest.makeSuite(SequenceToRangesTest), + unittest.makeSuite(RangesToSequenceTest), + unittest.makeSuite(SequenceRangesTest), + unittest.makeSuite(FindRangesIndexTest), + unittest.makeSuite(AddToRangesTest), + unittest.makeSuite(RemoveFromRangesTest), + ), + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_sysstats.py landscape-client-23.08/landscape/lib/tests/test_sysstats.py --- landscape-client-23.02/landscape/lib/tests/test_sysstats.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_sysstats.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,16 @@ -from datetime import datetime import os import re import unittest +from datetime import datetime from landscape.lib import testing -from landscape.lib.sysstats import ( - MemoryStats, CommandError, get_logged_in_users, get_uptime, - get_thermal_zones, LoginInfoReader, BootTimes) +from landscape.lib.sysstats import BootTimes +from landscape.lib.sysstats import CommandError +from landscape.lib.sysstats import get_logged_in_users +from landscape.lib.sysstats import get_thermal_zones +from landscape.lib.sysstats import get_uptime +from landscape.lib.sysstats import LoginInfoReader +from landscape.lib.sysstats import MemoryStats from landscape.lib.testing import append_login_data @@ -37,13 +41,15 @@ """ -class BaseTestCase(testing.TwistedTestCase, testing.FSTestCase, - unittest.TestCase): +class BaseTestCase( + testing.TwistedTestCase, + testing.FSTestCase, + unittest.TestCase, +): pass class MemoryStatsTest(BaseTestCase): - def test_get_memory_info(self): filename = self.makeFile(SAMPLE_MEMORY_INFO) memstats = MemoryStats(filename) @@ -53,14 +59,17 @@ self.assertEqual(memstats.total_swap, 1584) self.assertEqual(memstats.free_swap, 1567) self.assertEqual(memstats.used_swap, 17) - self.assertEqual("%.2f" % memstats.free_memory_percentage, "56.42") - self.assertEqual("%.2f" % memstats.free_swap_percentage, "98.93") - self.assertEqual("%.2f" % memstats.used_memory_percentage, "43.58") - self.assertEqual("%.2f" % memstats.used_swap_percentage, "1.07") + self.assertEqual(f"{memstats.free_memory_percentage:.2f}", "56.42") + self.assertEqual(f"{memstats.free_swap_percentage:.2f}", "98.93") + self.assertEqual(f"{memstats.used_memory_percentage:.2f}", "43.58") + self.assertEqual(f"{memstats.used_swap_percentage:.2f}", "1.07") def test_get_memory_info_without_swap(self): - sample = re.subn(r"Swap(Free|Total): *\d+ kB", r"Swap\1: 0", - SAMPLE_MEMORY_INFO)[0] + sample = re.subn( + r"Swap(Free|Total): *\d+ kB", + r"Swap\1: 0", + SAMPLE_MEMORY_INFO, + )[0] filename = self.makeFile(sample) memstats = MemoryStats(filename) self.assertEqual(memstats.total_swap, 0) @@ -78,21 +87,20 @@ def fake_who(self, users): dirname = self.makeDir() - os.environ["PATH"] = "%s:%s" % (dirname, os.environ["PATH"]) + os.environ["PATH"] = "{}:{}".format(dirname, os.environ["PATH"]) self.who_path = os.path.join(dirname, "who") who = open(self.who_path, "w") who.write("#!/bin/sh\n") who.write("test x$1 = x-q || echo missing-parameter\n") - who.write("echo %s\n" % users) - who.write("echo '# users=%d'\n" % len(users.split())) + who.write(f"echo {users}\n") + who.write(f"echo '# users={len(users.split()):d}'\n") who.close() os.chmod(self.who_path, 0o770) class LoggedInUsersTest(FakeWhoQTest): - def test_one_user(self): self.fake_who("joe") result = get_logged_in_users() @@ -121,6 +129,7 @@ def assert_failure(failure): failure.trap(CommandError) self.assertEqual(str(failure.value), "ERROR\n") + result.addErrback(assert_failure) return result @@ -131,14 +140,12 @@ def test_valid_uptime_file(self): """Test ensures that we can read a valid /proc/uptime file.""" proc_file = self.makeFile("17608.24 16179.25") - self.assertEqual("%0.2f" % get_uptime(proc_file), - "17608.24") + self.assertEqual(f"{get_uptime(proc_file):0.2f}", "17608.24") class ProcfsThermalZoneTest(BaseTestCase): - def setUp(self): - super(ProcfsThermalZoneTest, self).setUp() + super().setUp() self.thermal_zone_path = self.makeDir() def get_thermal_zones(self): @@ -154,7 +161,6 @@ class GetProcfsThermalZonesTest(ProcfsThermalZoneTest): - def test_non_existent_thermal_zone_directory(self): thermal_zones = list(get_thermal_zones("/non-existent/thermal_zone")) self.assertEqual(thermal_zones, []) @@ -171,8 +177,10 @@ self.assertEqual(thermal_zones[0].temperature, "50 C") self.assertEqual(thermal_zones[0].temperature_value, 50) self.assertEqual(thermal_zones[0].temperature_unit, "C") - self.assertEqual(thermal_zones[0].path, - os.path.join(self.thermal_zone_path, "THM0")) + self.assertEqual( + thermal_zones[0].path, + os.path.join(self.thermal_zone_path, "THM0"), + ) def test_two_thermal_zones(self): self.write_thermal_zone("THM0", "50 C") @@ -204,8 +212,10 @@ def test_temperature_file_with_missing_label(self): self.write_thermal_zone("THM0", "SOMETHINGBAD") - temperature_path = os.path.join(self.thermal_zone_path, - "THM0/temperature") + temperature_path = os.path.join( + self.thermal_zone_path, + "THM0/temperature", + ) file = open(temperature_path, "w") file.write("bad-label: foo bar\n") file.close() @@ -217,9 +227,8 @@ class ThermalZoneTest(BaseTestCase): - def setUp(self): - super(ThermalZoneTest, self).setUp() + super().setUp() self.thermal_zone_path = self.makeDir() def get_thermal_zones(self): @@ -235,7 +244,6 @@ class GetSysfsThermalZonesTest(ThermalZoneTest): - def test_non_existent_thermal_zone_directory(self): thermal_zones = list(get_thermal_zones("/non-existent/thermal_zone")) self.assertEqual(thermal_zones, []) @@ -252,8 +260,10 @@ self.assertEqual(thermal_zones[0].temperature, "50.0 C") self.assertEqual(thermal_zones[0].temperature_value, 50.0) self.assertEqual(thermal_zones[0].temperature_unit, "C") - self.assertEqual(thermal_zones[0].path, - os.path.join(self.thermal_zone_path, "THM0")) + self.assertEqual( + thermal_zones[0].path, + os.path.join(self.thermal_zone_path, "THM0"), + ) def test_two_thermal_zones(self): self.write_thermal_zone("THM0", "50000") @@ -276,8 +286,10 @@ self.assertEqual(thermal_zones[0].temperature, "50.4 C") self.assertEqual(thermal_zones[0].temperature_value, 50.432) self.assertEqual(thermal_zones[0].temperature_unit, "C") - self.assertEqual(thermal_zones[0].path, - os.path.join(self.thermal_zone_path, "THM0")) + self.assertEqual( + thermal_zones[0].path, + os.path.join(self.thermal_zone_path, "THM0"), + ) def test_badly_formatted_temperature(self): self.write_thermal_zone("THM0", "SOMETHING BAD") @@ -289,8 +301,7 @@ def test_read_error(self): self.write_thermal_zone("THM0", "50000") - temperature_path = os.path.join(self.thermal_zone_path, - "THM0/temp") + temperature_path = os.path.join(self.thermal_zone_path, "THM0/temp") os.chmod(temperature_path, 0o200) # --w------- thermal_zones = self.get_thermal_zones() self.assertEqual(len(thermal_zones), 1) @@ -316,16 +327,36 @@ def test_read_login_info(self): """Test ensures the reader can read login info.""" filename = self.makeFile("") - append_login_data(filename, login_type=1, pid=100, tty_device="/dev/", - id="1", username="jkakar", hostname="localhost", - termination_status=0, exit_status=0, session_id=1, - entry_time_seconds=105, entry_time_milliseconds=10, - remote_ip_address=[192, 168, 42, 102]) - append_login_data(filename, login_type=1, pid=101, tty_device="/dev/", - id="1", username="root", hostname="localhost", - termination_status=0, exit_status=0, session_id=2, - entry_time_seconds=235, entry_time_milliseconds=17, - remote_ip_address=[192, 168, 42, 102]) + append_login_data( + filename, + login_type=1, + pid=100, + tty_device="/dev/", + id="1", + username="jkakar", + hostname="localhost", + termination_status=0, + exit_status=0, + session_id=1, + entry_time_seconds=105, + entry_time_milliseconds=10, + remote_ip_address=[192, 168, 42, 102], + ) + append_login_data( + filename, + login_type=1, + pid=101, + tty_device="/dev/", + id="1", + username="root", + hostname="localhost", + termination_status=0, + exit_status=0, + session_id=2, + entry_time_seconds=235, + entry_time_milliseconds=17, + remote_ip_address=[192, 168, 42, 102], + ) file = open(filename, "rb") try: @@ -382,13 +413,16 @@ class BootTimesTest(BaseTestCase): - def test_fallback_to_uptime(self): """ When no data is available in C{/var/log/wtmp} L{BootTimes.get_last_boot_time} falls back to C{/proc/uptime}. """ wtmp_filename = self.makeFile("") - append_login_data(wtmp_filename, tty_device="~", username="shutdown", - entry_time_seconds=535) + append_login_data( + wtmp_filename, + tty_device="~", + username="shutdown", + entry_time_seconds=535, + ) self.assertTrue(BootTimes(filename=wtmp_filename).get_last_boot_time()) diff -Nru landscape-client-23.02/landscape/lib/tests/test_tag.py landscape-client-23.08/landscape/lib/tests/test_tag.py --- landscape-client-23.02/landscape/lib/tests/test_tag.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_tag.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,38 +1,42 @@ import unittest -from landscape.lib.tag import is_valid_tag, is_valid_tag_list +from landscape.lib.tag import is_valid_tag +from landscape.lib.tag import is_valid_tag_list class ValidTagTest(unittest.TestCase): - def test_valid_tags(self): """Test valid tags.""" - self.assertTrue(is_valid_tag(u"london")) - self.assertTrue(is_valid_tag(u"server")) - self.assertTrue(is_valid_tag(u"ubuntu-server")) - self.assertTrue(is_valid_tag(u"location-1234")) + self.assertTrue(is_valid_tag("london")) + self.assertTrue(is_valid_tag("server")) + self.assertTrue(is_valid_tag("ubuntu-server")) + self.assertTrue(is_valid_tag("location-1234")) self.assertTrue( - is_valid_tag(u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o")) + is_valid_tag("prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o"), + ) def test_invalid_tags(self): """Test invalid tags.""" - self.assertFalse(is_valid_tag(u"!!!")) - self.assertFalse(is_valid_tag(u"location 1234")) - self.assertFalse(is_valid_tag(u"ubuntu server")) + self.assertFalse(is_valid_tag("!!!")) + self.assertFalse(is_valid_tag("location 1234")) + self.assertFalse(is_valid_tag("ubuntu server")) def test_valid_tag_list(self): """Test valid taglist format strings.""" - self.assertTrue(is_valid_tag_list(u"london, server")) - self.assertTrue(is_valid_tag_list(u"ubuntu-server,london")) - self.assertTrue(is_valid_tag_list(u"location-1234, server")) + self.assertTrue(is_valid_tag_list("london, server")) + self.assertTrue(is_valid_tag_list("ubuntu-server,london")) + self.assertTrue(is_valid_tag_list("location-1234, server")) self.assertTrue( is_valid_tag_list( - u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o, server")) + "prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o, server", + ), + ) def test_invalid_tag_list(self): """Test invalid taglist format strings.""" - self.assertFalse(is_valid_tag_list(u"ubuntu-server,")) - self.assertFalse(is_valid_tag_list(u"!!!,")) - self.assertFalse(is_valid_tag_list(u"location 1234, server")) - self.assertFalse(is_valid_tag_list( - u"ubuntu, server, ")) + self.assertFalse(is_valid_tag_list("ubuntu-server,")) + self.assertFalse(is_valid_tag_list("!!!,")) + self.assertFalse(is_valid_tag_list("location 1234, server")) + self.assertFalse( + is_valid_tag_list("ubuntu, server, "), + ) diff -Nru landscape-client-23.02/landscape/lib/tests/test_timestamp.py landscape-client-23.08/landscape/lib/tests/test_timestamp.py --- landscape-client-23.02/landscape/lib/tests/test_timestamp.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_timestamp.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,5 @@ -from datetime import datetime import unittest +from datetime import datetime from landscape.lib.timestamp import to_timestamp diff -Nru landscape-client-23.02/landscape/lib/tests/test_twisted_util.py landscape-client-23.08/landscape/lib/tests/test_twisted_util.py --- landscape-client-23.02/landscape/lib/tests/test_twisted_util.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_twisted_util.py 2023-08-17 21:19:32.000000000 +0000 @@ -6,11 +6,13 @@ from landscape.lib.twisted_util import spawn_process -class SpawnProcessTest(testing.TwistedTestCase, testing.FSTestCase, - unittest.TestCase): - +class SpawnProcessTest( + testing.TwistedTestCase, + testing.FSTestCase, + unittest.TestCase, +): def setUp(self): - super(SpawnProcessTest, self).setUp() + super().setUp() self.command = self.makeFile("#!/bin/sh\necho -n $@") os.chmod(self.command, 0o755) @@ -34,6 +36,7 @@ """ The process returns the expected standard output. """ + def callback(args): out, err, code = args self.assertEqual(out, b"a b") @@ -77,8 +80,11 @@ out, err, code = args self.assertEqual(expected, lines) - result = spawn_process(self.command, args=(param,), - line_received=line_received) + result = spawn_process( + self.command, + args=(param,), + line_received=line_received, + ) result.addCallback(callback) return result @@ -98,8 +104,11 @@ out, err, code = args self.assertEqual(expected, lines) - result = spawn_process(self.command, args=(param,), - line_received=line_received) + result = spawn_process( + self.command, + args=(param,), + line_received=line_received, + ) result.addCallback(callback) return result @@ -120,8 +129,11 @@ out, err, code = args self.assertEqual(expected, lines) - result = spawn_process(self.command, args=(param,), - line_received=line_received) + result = spawn_process( + self.command, + args=(param,), + line_received=line_received, + ) result.addCallback(callback) return result @@ -145,10 +157,13 @@ C{SignalError}. """ # This script will kill itself. - create_text_file(self.command, """\ + create_text_file( + self.command, + """\ #!/bin/sh kill $$ -""") +""", + ) def callback(args): raise RuntimeError("Errback was not called.") diff -Nru landscape-client-23.02/landscape/lib/tests/test_versioning.py landscape-client-23.08/landscape/lib/tests/test_versioning.py --- landscape-client-23.02/landscape/lib/tests/test_versioning.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_versioning.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,10 @@ from unittest import TestCase -from landscape.lib.versioning import is_version_higher, sort_versions +from landscape.lib.versioning import is_version_higher +from landscape.lib.versioning import sort_versions class IsVersionHigherTest(TestCase): - def test_greater(self): """ The C{is_version_higher} function returns C{True} if the first @@ -28,7 +28,6 @@ class SortVersionsTest(TestCase): - def test_sort(self): """ The C{sort_versions} function sorts the given versions from the diff -Nru landscape-client-23.02/landscape/lib/tests/test_vm_info.py landscape-client-23.08/landscape/lib/tests/test_vm_info.py --- landscape-client-23.02/landscape/lib/tests/test_vm_info.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_vm_info.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,7 +2,8 @@ import unittest from landscape.lib import testing -from landscape.lib.vm_info import get_vm_info, get_container_info +from landscape.lib.vm_info import get_container_info +from landscape.lib.vm_info import get_vm_info class BaseTestCase(testing.FSTestCase, unittest.TestCase): @@ -10,15 +11,16 @@ class GetVMInfoTest(BaseTestCase): - def setUp(self): - super(GetVMInfoTest, self).setUp() + super().setUp() self.root_path = self.makeDir() self.proc_path = self.makeDir( - path=os.path.join(self.root_path, "proc")) + path=os.path.join(self.root_path, "proc"), + ) self.sys_path = self.makeDir(path=os.path.join(self.root_path, "sys")) self.proc_sys_path = self.makeDir( - path=os.path.join(self.proc_path, "sys")) + path=os.path.join(self.proc_path, "sys"), + ) def make_dmi_info(self, name, content): """Create /sys/class/dmi/id/ with the specified content.""" @@ -167,7 +169,8 @@ cpuinfo = ( "platform : Some Machine\n" "model : Some CPU (emulated by qemu)\n" - "machine : Some Machine (emulated by qemu)\n") + "machine : Some Machine (emulated by qemu)\n" + ) self.makeFile(path=cpuinfo_path, content=cpuinfo) self.assertEqual(b"kvm", get_vm_info(root_path=self.root_path)) @@ -198,9 +201,8 @@ class GetContainerInfoTest(BaseTestCase): - def setUp(self): - super(GetContainerInfoTest, self).setUp() + super().setUp() self.run_path = self.makeDir() def test_no_container(self): diff -Nru landscape-client-23.02/landscape/lib/tests/test_warning.py landscape-client-23.08/landscape/lib/tests/test_warning.py --- landscape-client-23.02/landscape/lib/tests/test_warning.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/tests/test_warning.py 2023-08-17 21:19:32.000000000 +0000 @@ -7,11 +7,11 @@ class WarningTest(TestCase): def setUp(self): - super(WarningTest, self).setUp() + super().setUp() self.orig_filters = warnings.filters[:] def tearDown(self): - super(WarningTest, self).tearDown() + super().tearDown() warnings.filters[:] = self.orig_filters def test_hide_warnings(self): @@ -24,7 +24,10 @@ self.assertEqual( filters, # The frontmost should "default" (i.e. print) on UserWarnings - [("default", None, UserWarning, None, 0), - # The one just behind that should indicate that we should ignore - # all other warnings. - ("ignore", None, Warning, None, 0)]) + [ + ("default", None, UserWarning, None, 0), + # The one just behind that should indicate that we should + # ignore all other warnings. + ("ignore", None, Warning, None, 0), + ], + ) diff -Nru landscape-client-23.02/landscape/lib/twisted_util.py landscape-client-23.08/landscape/lib/twisted_util.py --- landscape-client-23.02/landscape/lib/twisted_util.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/twisted_util.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,14 @@ import io -from twisted.internet.defer import DeferredList, Deferred -from twisted.internet.protocol import ProcessProtocol -from twisted.internet.process import Process, ProcessReader from twisted.internet import reactor +from twisted.internet.defer import Deferred +from twisted.internet.defer import DeferredList +from twisted.internet.process import Process +from twisted.internet.process import ProcessReader +from twisted.internet.protocol import ProcessProtocol +from twisted.python.compat import itervalues +from twisted.python.compat import networkString from twisted.python.failure import Failure -from twisted.python.compat import itervalues, networkString from landscape.lib.encoding import encode_values @@ -15,8 +18,11 @@ def gather_results(deferreds, consume_errors=False): - d = DeferredList(deferreds, fireOnOneErrback=1, - consumeErrors=consume_errors) + d = DeferredList( + deferreds, + fireOnOneErrback=1, + consumeErrors=consume_errors, + ) d.addCallback(lambda r: [x[1] for x in r]) d.addErrback(lambda f: f.value.subFailure) return d @@ -34,12 +40,12 @@ self.line_received = line_received self._partial_line = b"" - def connectionMade(self): + def connectionMade(self): # noqa: N802 if self.stdin is not None: self.transport.write(networkString(self.stdin)) self.transport.closeStdin() - def outReceived(self, data): + def outReceived(self, data): # noqa: N802 self.outBuf.write(data) if self.line_received is None: @@ -55,7 +61,7 @@ for line in lines: self.line_received(line) - def processEnded(self, reason): + def processEnded(self, reason): # noqa: N802 if self._partial_line: self.line_received(self._partial_line) self._partial_line = b"" @@ -70,9 +76,18 @@ self.deferred.callback((out, err, code)) -def spawn_process(executable, args=(), env={}, path=None, uid=None, gid=None, - usePTY=False, wait_pipes=True, line_received=None, - stdin=None): +def spawn_process( + executable, + args=(), + env={}, + path=None, + uid=None, + gid=None, + usePTY=False, + wait_pipes=True, + line_received=None, + stdin=None, +): """ Spawn a process using Twisted reactor. @@ -92,16 +107,26 @@ list_args.extend(args) result = Deferred() - protocol = AllOutputProcessProtocol(result, stdin=stdin, - line_received=line_received) + protocol = AllOutputProcessProtocol( + result, + stdin=stdin, + line_received=line_received, + ) env = encode_values(env) - process = reactor.spawnProcess(protocol, executable, args=list_args, - env=env, path=path, uid=uid, gid=gid, - usePTY=usePTY) + process = reactor.spawnProcess( + protocol, + executable, + args=list_args, + env=env, + path=path, + uid=uid, + gid=gid, + usePTY=usePTY, + ) if not wait_pipes: - def maybeCallProcessEnded(): + def maybeCallProcessEnded(): # noqa: N802 """A less strict version of Process.maybeCallProcessEnded. This behaves exactly like the original method, but in case the diff -Nru landscape-client-23.02/landscape/lib/user.py landscape-client-23.08/landscape/lib/user.py --- landscape-client-23.02/landscape/lib/user.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/user.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,7 +2,6 @@ import pwd from landscape.lib.compat import _PY3 - from landscape.lib.encoding import encode_if_needed @@ -32,7 +31,7 @@ # locale.setlocale() to use UTF-8 was not successful. info = pwd.getpwnam(username_str) except (KeyError, UnicodeEncodeError): - raise UnknownUserError(u"Unknown user '%s'" % username) + raise UnknownUserError(f"Unknown user '{username}'") uid = info.pw_uid gid = info.pw_gid path = info.pw_dir diff -Nru landscape-client-23.02/landscape/lib/versioning.py landscape-client-23.08/landscape/lib/versioning.py --- landscape-client-23.02/landscape/lib/versioning.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/versioning.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ """Helpers for dealing with software versioning.""" - from distutils.version import StrictVersion @@ -30,6 +29,9 @@ """ strict_versions = sorted( [StrictVersion(version.decode("ascii")) for version in versions], - reverse=True) - return [str(strict_version).encode("ascii") - for strict_version in strict_versions] + reverse=True, + ) + return [ + str(strict_version).encode("ascii") + for strict_version in strict_versions + ] diff -Nru landscape-client-23.02/landscape/lib/vm_info.py landscape-client-23.08/landscape/lib/vm_info.py --- landscape-client-23.02/landscape/lib/vm_info.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/vm_info.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,7 +3,8 @@ """ import os -from landscape.lib.fs import read_binary_file, read_text_file +from landscape.lib.fs import read_binary_file +from landscape.lib.fs import read_text_file DMI_FILES = ("sys_vendor", "chassis_vendor", "bios_vendor", "product_name") @@ -79,7 +80,7 @@ (b"kvm", b"kvm"), (b"vmware", b"vmware"), (b"rhev", b"kvm"), - (b"parallels", b"kvm") + (b"parallels", b"kvm"), ) for name, vm_type in content_vendors_map: if name in vendor: @@ -92,7 +93,7 @@ """Check if the host is virtualized looking at /proc/cpuinfo content.""" try: cpuinfo = read_text_file(os.path.join(root_path, "proc/cpuinfo")) - except (IOError, OSError): + except OSError: return b"" if "qemu" in cpuinfo: diff -Nru landscape-client-23.02/landscape/lib/warning.py landscape-client-23.08/landscape/lib/warning.py --- landscape-client-23.02/landscape/lib/warning.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/lib/warning.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ """Warning utilities for Landscape.""" - import warnings diff -Nru landscape-client-23.02/landscape/message_schemas/__init__.py landscape-client-23.08/landscape/message_schemas/__init__.py --- landscape-client-23.02/landscape/message_schemas/__init__.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/message_schemas/__init__.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ # Copyright 2017 Canonical Limited. All rights reserved. # flake8: noqa - # This import is here for backward-compatibility. from .server_bound import * diff -Nru landscape-client-23.02/landscape/message_schemas/message.py landscape-client-23.08/landscape/message_schemas/message.py --- landscape-client-23.02/landscape/message_schemas/message.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/message_schemas/message.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,8 @@ - -from landscape.lib.schema import KeyDict, Float, Bytes, Constant, Any +from landscape.lib.schema import Any +from landscape.lib.schema import Bytes +from landscape.lib.schema import Constant +from landscape.lib.schema import Float +from landscape.lib.schema import KeyDict class Message(KeyDict): @@ -16,6 +19,7 @@ @param api: The server API version needed to send this message, if C{None} any version is fine. """ + def __init__(self, type, schema, optional=None, api=None): self.type = type self.api = api @@ -26,7 +30,7 @@ optional.extend(["timestamp", "api"]) else: optional = ["timestamp", "api"] - super(Message, self).__init__(schema, optional=optional) + super().__init__(schema, optional=optional) def coerce(self, value): for k in list(value.keys()): @@ -36,4 +40,4 @@ # in a message talks to an older server, that don't understand # the new field yet. value.pop(k) - return super(Message, self).coerce(value) + return super().coerce(value) diff -Nru landscape-client-23.02/landscape/message_schemas/server_bound.py landscape-client-23.08/landscape/message_schemas/server_bound.py --- landscape-client-23.02/landscape/message_schemas/server_bound.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/message_schemas/server_bound.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,26 +1,66 @@ # Copyright 2017 Canonical Limited. All rights reserved. - -from landscape.lib.schema import ( - KeyDict, Dict, List, Tuple, - Bool, Int, Float, Bytes, Unicode, Constant, Any) from .message import Message +from landscape.lib.schema import Any +from landscape.lib.schema import Bool +from landscape.lib.schema import Bytes +from landscape.lib.schema import Constant +from landscape.lib.schema import Dict +from landscape.lib.schema import Float +from landscape.lib.schema import Int +from landscape.lib.schema import KeyDict +from landscape.lib.schema import List +from landscape.lib.schema import Tuple +from landscape.lib.schema import Unicode __all__ = [ - "ACTIVE_PROCESS_INFO", "COMPUTER_UPTIME", "CLIENT_UPTIME", - "OPERATION_RESULT", "COMPUTER_INFO", "DISTRIBUTION_INFO", - "HARDWARE_INVENTORY", "HARDWARE_INFO", "LOAD_AVERAGE", "MEMORY_INFO", - "RESYNCHRONIZE", "MOUNT_ACTIVITY", "MOUNT_INFO", "FREE_SPACE", - "REGISTER", "REGISTER_3_3", - "TEMPERATURE", "PROCESSOR_INFO", "USERS", "PACKAGES", "PACKAGE_LOCKS", - "CHANGE_PACKAGES_RESULT", "UNKNOWN_PACKAGE_HASHES", - "ADD_PACKAGES", "PACKAGE_REPORTER_RESULT", "TEXT_MESSAGE", "TEST", - "CUSTOM_GRAPH", "REBOOT_REQUIRED", "APT_PREFERENCES", - "NETWORK_DEVICE", "NETWORK_ACTIVITY", - "REBOOT_REQUIRED_INFO", "UPDATE_MANAGER_INFO", "CPU_USAGE", - "CEPH_USAGE", "SWIFT_USAGE", "SWIFT_DEVICE_INFO", "KEYSTONE_TOKEN", - "JUJU_UNITS_INFO", "CLOUD_METADATA", "COMPUTER_TAGS", "UBUNTU_PRO_INFO", - ] + "ACTIVE_PROCESS_INFO", + "COMPUTER_UPTIME", + "CLIENT_UPTIME", + "OPERATION_RESULT", + "COMPUTER_INFO", + "DISTRIBUTION_INFO", + "HARDWARE_INVENTORY", + "HARDWARE_INFO", + "LOAD_AVERAGE", + "MEMORY_INFO", + "RESYNCHRONIZE", + "MOUNT_ACTIVITY", + "MOUNT_INFO", + "FREE_SPACE", + "REGISTER", + "REGISTER_3_3", + "TEMPERATURE", + "PROCESSOR_INFO", + "USERS", + "PACKAGES", + "PACKAGE_LOCKS", + "CHANGE_PACKAGES_RESULT", + "UNKNOWN_PACKAGE_HASHES", + "ADD_PACKAGES", + "PACKAGE_REPORTER_RESULT", + "TEXT_MESSAGE", + "TEST", + "CUSTOM_GRAPH", + "REBOOT_REQUIRED", + "APT_PREFERENCES", + "NETWORK_DEVICE", + "NETWORK_ACTIVITY", + "REBOOT_REQUIRED_INFO", + "UPDATE_MANAGER_INFO", + "CPU_USAGE", + "CEPH_USAGE", + "SWIFT_USAGE", + "SWIFT_DEVICE_INFO", + "KEYSTONE_TOKEN", + "JUJU_UNITS_INFO", + "CLOUD_METADATA", + "COMPUTER_TAGS", + "UBUNTU_PRO_INFO", + "LIVEPATCH", + "UBUNTU_PRO_REBOOT_REQUIRED", + "SNAPS", +] # When adding a new schema, which deprecates an older schema, the recommended @@ -31,160 +71,234 @@ # USERS_2_1 -process_info = KeyDict({"pid": Int(), - "name": Unicode(), - "state": Bytes(), - "sleep-average": Int(), - "uid": Int(), - "gid": Int(), - "vm-size": Int(), - "start-time": Int(), - "percent-cpu": Float()}, - # Optional for backwards compatibility - optional=["vm-size", "sleep-average", "percent-cpu"]) +process_info = KeyDict( + { + "pid": Int(), + "name": Unicode(), + "state": Bytes(), + "sleep-average": Int(), + "uid": Int(), + "gid": Int(), + "vm-size": Int(), + "start-time": Int(), + "percent-cpu": Float(), + }, + # Optional for backwards compatibility + optional=["vm-size", "sleep-average", "percent-cpu"], +) ACTIVE_PROCESS_INFO = Message( "active-process-info", - {"kill-processes": List(Int()), - "kill-all-processes": Bool(), - "add-processes": List(process_info), - "update-processes": List(process_info)}, + { + "kill-processes": List(Int()), + "kill-all-processes": Bool(), + "add-processes": List(process_info), + "update-processes": List(process_info), + }, # XXX Really we don't want all three of these keys to be optional: # we always want _something_... - optional=["add-processes", "update-processes", "kill-processes", - "kill-all-processes"]) + optional=[ + "add-processes", + "update-processes", + "kill-processes", + "kill-all-processes", + ], +) COMPUTER_UPTIME = Message( "computer-uptime", - {"startup-times": List(Int()), - "shutdown-times": List(Int())}, + {"startup-times": List(Int()), "shutdown-times": List(Int())}, # XXX Again, one or the other. - optional=["startup-times", "shutdown-times"]) + optional=["startup-times", "shutdown-times"], +) CLIENT_UPTIME = Message( "client-uptime", - {"period": Tuple(Float(), Float()), - "components": List(Int())}, - optional=["components"]) # just for backwards compatibility + {"period": Tuple(Float(), Float()), "components": List(Int())}, + optional=["components"], +) # just for backwards compatibility OPERATION_RESULT = Message( "operation-result", - {"operation-id": Int(), - "status": Int(), - "result-code": Int(), - "result-text": Unicode()}, - optional=["result-code", "result-text"]) + { + "operation-id": Int(), + "status": Int(), + "result-code": Int(), + "result-text": Unicode(), + }, + optional=["result-code", "result-text"], +) COMPUTER_INFO = Message( "computer-info", - {"hostname": Unicode(), - "total-memory": Int(), - "total-swap": Int(), - "annotations": Dict(Unicode(), Unicode())}, + { + "hostname": Unicode(), + "total-memory": Int(), + "total-swap": Int(), + "annotations": Dict(Unicode(), Unicode()), + }, # Not sure why these are all optional, but it's explicitly tested # in the server - optional=["hostname", "total-memory", "total-swap", "annotations"]) + optional=["hostname", "total-memory", "total-swap", "annotations"], +) DISTRIBUTION_INFO = Message( "distribution-info", - {"distributor-id": Unicode(), - "description": Unicode(), - "release": Unicode(), - "code-name": Unicode()}, + { + "distributor-id": Unicode(), + "description": Unicode(), + "release": Unicode(), + "code-name": Unicode(), + }, # all optional because the lsb-release file may not have all data. - optional=["distributor-id", "description", "release", "code-name"]) + optional=["distributor-id", "description", "release", "code-name"], +) CLOUD_METADATA = Message( "cloud-instance-metadata", - {"instance-id": Unicode(), - "ami-id": Unicode(), - "instance-type": Unicode()}) - - -hal_data = Dict(Unicode(), - Any(Unicode(), List(Unicode()), Bool(), Int(), Float())) - -HARDWARE_INVENTORY = Message("hardware-inventory", { - "devices": List(Any(Tuple(Constant("create"), hal_data), - Tuple(Constant("update"), - Unicode(), # udi, - hal_data, # creates, - hal_data, # updates, - hal_data), # deletes - Tuple(Constant("delete"), - Unicode()), - ), - )}) - - -HARDWARE_INFO = Message("hardware-info", { - "data": Unicode()}) - -juju_data = {"environment-uuid": Unicode(), - "api-addresses": List(Unicode()), - "unit-name": Unicode(), - "private-address": Unicode()} + { + "instance-id": Unicode(), + "ami-id": Unicode(), + "instance-type": Unicode(), + }, +) + + +hal_data = Dict( + Unicode(), + Any(Unicode(), List(Unicode()), Bool(), Int(), Float()), +) + +HARDWARE_INVENTORY = Message( + "hardware-inventory", + { + "devices": List( + Any( + Tuple(Constant("create"), hal_data), + Tuple( + Constant("update"), + Unicode(), # udi, + hal_data, # creates, + hal_data, # updates, + hal_data, # deletes + ), + Tuple(Constant("delete"), Unicode()), + ), + ), + }, +) + + +HARDWARE_INFO = Message("hardware-info", {"data": Unicode()}) + +juju_data = { + "environment-uuid": Unicode(), + "api-addresses": List(Unicode()), + "unit-name": Unicode(), + "private-address": Unicode(), +} # The copy of juju_data is needed because Message mutates the dictionary -JUJU_UNITS_INFO = Message("juju-units-info", { - "juju-info-list": List(KeyDict(juju_data.copy(), - optional=["private-address"])) - }) - -LOAD_AVERAGE = Message("load-average", { - "load-averages": List(Tuple(Int(), Float())), - }) - -CPU_USAGE = Message("cpu-usage", { - "cpu-usages": List(Tuple(Int(), Float())), - }) - -CEPH_USAGE = Message("ceph-usage", { - "ring-id": Unicode(), - # Usage data points in the form (timestamp, size, avail, used) - "data-points": List(Tuple(Int(), Int(), Int(), Int())), - # Unused now, for backwards compatibility - "ceph-usages": List(None)}) - -SWIFT_DEVICE_INFO = Message("swift-device-info", { - "swift-device-info": List( - KeyDict({"device": Unicode(), "mounted": Bool()})) - }) - -SWIFT_USAGE = Message("swift-usage", { - # Usage data points in the form (timestamp, device, size, avail, used) - "data-points": List(Tuple(Int(), Unicode(), Int(), Int(), Int()))}) - -KEYSTONE_TOKEN = Message("keystone-token", { - "data": Any(Bytes(), Constant(None)) -}) - -MEMORY_INFO = Message("memory-info", { - "memory-info": List(Tuple(Float(), Int(), Int())), - }) +JUJU_UNITS_INFO = Message( + "juju-units-info", + { + "juju-info-list": List( + KeyDict(juju_data.copy(), optional=["private-address"]), + ), + }, +) + +LOAD_AVERAGE = Message( + "load-average", + { + "load-averages": List(Tuple(Int(), Float())), + }, +) + +CPU_USAGE = Message( + "cpu-usage", + { + "cpu-usages": List(Tuple(Int(), Float())), + }, +) + +CEPH_USAGE = Message( + "ceph-usage", + { + "ring-id": Unicode(), + # Usage data points in the form (timestamp, size, avail, used) + "data-points": List(Tuple(Int(), Int(), Int(), Int())), + # Unused now, for backwards compatibility + "ceph-usages": List(None), + }, +) + +SWIFT_DEVICE_INFO = Message( + "swift-device-info", + { + "swift-device-info": List( + KeyDict({"device": Unicode(), "mounted": Bool()}), + ), + }, +) + +SWIFT_USAGE = Message( + "swift-usage", + { + # Usage data points in the form (timestamp, device, size, avail, used) + "data-points": List(Tuple(Int(), Unicode(), Int(), Int(), Int())), + }, +) + +KEYSTONE_TOKEN = Message( + "keystone-token", + {"data": Any(Bytes(), Constant(None))}, +) + +MEMORY_INFO = Message( + "memory-info", + { + "memory-info": List(Tuple(Float(), Int(), Int())), + }, +) RESYNCHRONIZE = Message( "resynchronize", {"operation-id": Int()}, # operation-id is only there if it's a response to a server-initiated # resynchronize. - optional=["operation-id"]) - -MOUNT_ACTIVITY = Message("mount-activity", { - "activities": List(Tuple(Float(), Unicode(), Bool()))}) - - -MOUNT_INFO = Message("mount-info", { - "mount-info": List(Tuple(Float(), - KeyDict({"mount-point": Unicode(), - "device": Unicode(), - "filesystem": Unicode(), - "total-space": Int()}) - )), - }) + optional=["operation-id"], +) -FREE_SPACE = Message("free-space", { - "free-space": List(Tuple(Float(), Unicode(), Int()))}) +MOUNT_ACTIVITY = Message( + "mount-activity", + {"activities": List(Tuple(Float(), Unicode(), Bool()))}, +) + + +MOUNT_INFO = Message( + "mount-info", + { + "mount-info": List( + Tuple( + Float(), + KeyDict( + { + "mount-point": Unicode(), + "device": Unicode(), + "filesystem": Unicode(), + "total-space": Int(), + }, + ), + ), + ), + }, +) + +FREE_SPACE = Message( + "free-space", + {"free-space": List(Tuple(Float(), Unicode(), Int()))}, +) REGISTER = Message( @@ -192,16 +306,25 @@ # The term used in the UI is actually 'registration_key', but we keep # the message schema field as 'registration_password' in case a new # client contacts an older server. - {"registration_password": Any(Unicode(), Constant(None)), - "computer_title": Unicode(), - "hostname": Unicode(), - "account_name": Unicode(), - "tags": Any(Unicode(), Constant(None)), - "vm-info": Bytes(), - "container-info": Unicode(), - "access_group": Unicode()}, - optional=["registration_password", "hostname", "tags", "vm-info", - "container-info", "access_group"]) + { + "registration_password": Any(Unicode(), Constant(None)), + "computer_title": Unicode(), + "hostname": Unicode(), + "account_name": Unicode(), + "tags": Any(Unicode(), Constant(None)), + "vm-info": Bytes(), + "container-info": Unicode(), + "access_group": Unicode(), + }, + optional=[ + "registration_password", + "hostname", + "tags", + "vm-info", + "container-info", + "access_group", + ], +) REGISTER_3_3 = Message( @@ -209,23 +332,38 @@ # The term used in the UI is actually 'registration_key', but we keep # the message schema field as 'registration_password' in case a new # client contacts an older server. - {"registration_password": Any(Unicode(), Constant(None)), - "computer_title": Unicode(), - "hostname": Unicode(), - "account_name": Unicode(), - "tags": Any(Unicode(), Constant(None)), - "vm-info": Bytes(), - "container-info": Unicode(), - "juju-info": KeyDict({"environment-uuid": Unicode(), - "api-addresses": List(Unicode()), - "machine-id": Unicode()}), - "access_group": Unicode(), - "clone_secure_id": Any(Unicode(), Constant(None)), - "ubuntu_pro_info": Unicode()}, + { + "registration_password": Any(Unicode(), Constant(None)), + "computer_title": Unicode(), + "hostname": Unicode(), + "account_name": Unicode(), + "tags": Any(Unicode(), Constant(None)), + "vm-info": Bytes(), + "container-info": Unicode(), + "juju-info": KeyDict( + { + "environment-uuid": Unicode(), + "api-addresses": List(Unicode()), + "machine-id": Unicode(), + }, + ), + "access_group": Unicode(), + "clone_secure_id": Any(Unicode(), Constant(None)), + "ubuntu_pro_info": Unicode(), + }, api=b"3.3", - optional=["registration_password", "hostname", "tags", "vm-info", - "container-info", "access_group", "juju-info", - "clone_secure_id", "ubuntu_pro_info"]) + optional=[ + "registration_password", + "hostname", + "tags", + "vm-info", + "container-info", + "access_group", + "juju-info", + "clone_secure_id", + "ubuntu_pro_info", + ], +) # XXX The register-provisioned-machine message is obsolete, it's kept around @@ -233,7 +371,8 @@ # to have it is 14.07). Eventually it shall be dropped. REGISTER_PROVISIONED_MACHINE = Message( "register-provisioned-machine", - {"otp": Bytes()}) + {"otp": Bytes()}, +) # XXX The register-cloud-vm message is obsolete, it's kept around just to not @@ -241,242 +380,329 @@ # is 14.07). Eventually it shall be dropped. REGISTER_CLOUD_VM = Message( "register-cloud-vm", - {"hostname": Unicode(), - "otp": Any(Bytes(), Constant(None)), - "instance_key": Unicode(), - "account_name": Any(Unicode(), Constant(None)), - "registration_password": Any(Unicode(), Constant(None)), - "reservation_key": Unicode(), - "public_hostname": Unicode(), - "local_hostname": Unicode(), - "kernel_key": Any(Unicode(), Constant(None)), - "ramdisk_key": Any(Unicode(), Constant(None)), - "launch_index": Int(), - "image_key": Unicode(), - "tags": Any(Unicode(), Constant(None)), - "vm-info": Bytes(), - "public_ipv4": Unicode(), - "local_ipv4": Unicode(), - "access_group": Unicode()}, - optional=["tags", "vm-info", "public_ipv4", "local_ipv4", "access_group"]) - - -TEMPERATURE = Message("temperature", { - "thermal-zone": Unicode(), - "temperatures": List(Tuple(Int(), Float())), - }) + { + "hostname": Unicode(), + "otp": Any(Bytes(), Constant(None)), + "instance_key": Unicode(), + "account_name": Any(Unicode(), Constant(None)), + "registration_password": Any(Unicode(), Constant(None)), + "reservation_key": Unicode(), + "public_hostname": Unicode(), + "local_hostname": Unicode(), + "kernel_key": Any(Unicode(), Constant(None)), + "ramdisk_key": Any(Unicode(), Constant(None)), + "launch_index": Int(), + "image_key": Unicode(), + "tags": Any(Unicode(), Constant(None)), + "vm-info": Bytes(), + "public_ipv4": Unicode(), + "local_ipv4": Unicode(), + "access_group": Unicode(), + }, + optional=["tags", "vm-info", "public_ipv4", "local_ipv4", "access_group"], +) + + +TEMPERATURE = Message( + "temperature", + { + "thermal-zone": Unicode(), + "temperatures": List(Tuple(Int(), Float())), + }, +) PROCESSOR_INFO = Message( "processor-info", - {"processors": List(KeyDict({"processor-id": Int(), - "vendor": Unicode(), - "model": Unicode(), - "cache-size": Int(), - }, - optional=["vendor", "cache-size"]))}) - -user_data = KeyDict({ - "uid": Int(), - "username": Unicode(), - "name": Any(Unicode(), Constant(None)), - "enabled": Bool(), - "location": Any(Unicode(), Constant(None)), - "home-phone": Any(Unicode(), Constant(None)), - "work-phone": Any(Unicode(), Constant(None)), - "primary-gid": Any(Int(), Constant(None)), - "primary-groupname": Unicode()}, - optional=["primary-groupname", "primary-gid"]) - -group_data = KeyDict({ - "gid": Int(), - "name": Unicode()}) + { + "processors": List( + KeyDict( + { + "processor-id": Int(), + "vendor": Unicode(), + "model": Unicode(), + "cache-size": Int(), + }, + optional=["vendor", "cache-size"], + ), + ), + }, +) + +user_data = KeyDict( + { + "uid": Int(), + "username": Unicode(), + "name": Any(Unicode(), Constant(None)), + "enabled": Bool(), + "location": Any(Unicode(), Constant(None)), + "home-phone": Any(Unicode(), Constant(None)), + "work-phone": Any(Unicode(), Constant(None)), + "primary-gid": Any(Int(), Constant(None)), + "primary-groupname": Unicode(), + }, + optional=["primary-groupname", "primary-gid"], +) + +group_data = KeyDict({"gid": Int(), "name": Unicode()}) USERS = Message( "users", - {"operation-id": Int(), - "create-users": List(user_data), - "update-users": List(user_data), - "delete-users": List(Unicode()), - "create-groups": List(group_data), - "update-groups": List(group_data), - "delete-groups": List(Unicode()), - "create-group-members": Dict(Unicode(), List(Unicode())), - "delete-group-members": Dict(Unicode(), List(Unicode())), - }, + { + "operation-id": Int(), + "create-users": List(user_data), + "update-users": List(user_data), + "delete-users": List(Unicode()), + "create-groups": List(group_data), + "update-groups": List(group_data), + "delete-groups": List(Unicode()), + "create-group-members": Dict(Unicode(), List(Unicode())), + "delete-group-members": Dict(Unicode(), List(Unicode())), + }, # operation-id is only there for responses, and all other are # optional as long as one of them is there (no way to say that yet) - optional=["operation-id", "create-users", "update-users", "delete-users", - "create-groups", "update-groups", "delete-groups", - "create-group-members", "delete-group-members"]) + optional=[ + "operation-id", + "create-users", + "update-users", + "delete-users", + "create-groups", + "update-groups", + "delete-groups", + "create-group-members", + "delete-group-members", + ], +) USERS_2_1 = Message( "users", - {"operation-id": Int(), - "create-users": List(user_data), - "update-users": List(user_data), - "delete-users": List(Int()), - "create-groups": List(group_data), - "update-groups": List(group_data), - "delete-groups": List(Int()), - "create-group-members": Dict(Int(), List(Int())), - "delete-group-members": Dict(Int(), List(Int())), - }, + { + "operation-id": Int(), + "create-users": List(user_data), + "update-users": List(user_data), + "delete-users": List(Int()), + "create-groups": List(group_data), + "update-groups": List(group_data), + "delete-groups": List(Int()), + "create-group-members": Dict(Int(), List(Int())), + "delete-group-members": Dict(Int(), List(Int())), + }, # operation-id is only there for responses, and all other are # optional as long as one of them is there (no way to say that yet) - optional=["operation-id", "create-users", "update-users", "delete-users", - "create-groups", "update-groups", "delete-groups", - "create-group-members", "delete-group-members"]) + optional=[ + "operation-id", + "create-users", + "update-users", + "delete-users", + "create-groups", + "update-groups", + "delete-groups", + "create-group-members", + "delete-group-members", + ], +) USERS_2_0 = Message( "users", - {"operation-id": Int(), - "create-users": List(user_data), - "update-users": List(user_data), - "delete-users": List(Int()), - "create-groups": List(group_data), - "update-groups": List(group_data), - "delete-groups": List(Int()), - "create-group-members": Dict(Int(), List(Int())), - "delete-group-members": Dict(Int(), List(Int())), - }, + { + "operation-id": Int(), + "create-users": List(user_data), + "update-users": List(user_data), + "delete-users": List(Int()), + "create-groups": List(group_data), + "update-groups": List(group_data), + "delete-groups": List(Int()), + "create-group-members": Dict(Int(), List(Int())), + "delete-group-members": Dict(Int(), List(Int())), + }, # operation-id is only there for responses, and all other are # optional as long as one of them is there (no way to say that yet) - optional=["operation-id", "create-users", "update-users", "delete-users", - "create-groups", "update-groups", "delete-groups", - "create-group-members", "delete-group-members"]) + optional=[ + "operation-id", + "create-users", + "update-users", + "delete-users", + "create-groups", + "update-groups", + "delete-groups", + "create-group-members", + "delete-group-members", + ], +) opt_str = Any(Unicode(), Constant(None)) OLD_USERS = Message( "users", - {"users": List(KeyDict({"username": Unicode(), - "uid": Int(), - "realname": opt_str, - "location": opt_str, - "home-phone": opt_str, - "work-phone": opt_str, - "enabled": Bool()}, - optional=["location", "home-phone", "work-phone"])), - "groups": List(KeyDict({"gid": Int(), - "name": Unicode(), - "members": List(Unicode())}))}, - optional=["groups"]) + { + "users": List( + KeyDict( + { + "username": Unicode(), + "uid": Int(), + "realname": opt_str, + "location": opt_str, + "home-phone": opt_str, + "work-phone": opt_str, + "enabled": Bool(), + }, + optional=["location", "home-phone", "work-phone"], + ), + ), + "groups": List( + KeyDict( + {"gid": Int(), "name": Unicode(), "members": List(Unicode())}, + ), + ), + }, + optional=["groups"], +) package_ids_or_ranges = List(Any(Tuple(Int(), Int()), Int())) PACKAGES = Message( "packages", - {"installed": package_ids_or_ranges, - "available": package_ids_or_ranges, - "available-upgrades": package_ids_or_ranges, - "locked": package_ids_or_ranges, - "autoremovable": package_ids_or_ranges, - "not-autoremovable": package_ids_or_ranges, - "security": package_ids_or_ranges, - "not-installed": package_ids_or_ranges, - "not-available": package_ids_or_ranges, - "not-available-upgrades": package_ids_or_ranges, - "not-locked": package_ids_or_ranges, - "not-security": package_ids_or_ranges}, - optional=["installed", "available", "available-upgrades", "locked", - "not-available", "not-installed", "not-available-upgrades", - "not-locked", "autoremovable", "not-autoremovable", "security", - "not-security"]) + { + "installed": package_ids_or_ranges, + "available": package_ids_or_ranges, + "available-upgrades": package_ids_or_ranges, + "locked": package_ids_or_ranges, + "autoremovable": package_ids_or_ranges, + "not-autoremovable": package_ids_or_ranges, + "security": package_ids_or_ranges, + "not-installed": package_ids_or_ranges, + "not-available": package_ids_or_ranges, + "not-available-upgrades": package_ids_or_ranges, + "not-locked": package_ids_or_ranges, + "not-security": package_ids_or_ranges, + }, + optional=[ + "installed", + "available", + "available-upgrades", + "locked", + "not-available", + "not-installed", + "not-available-upgrades", + "not-locked", + "autoremovable", + "not-autoremovable", + "security", + "not-security", + ], +) package_locks = List(Tuple(Unicode(), Unicode(), Unicode())) PACKAGE_LOCKS = Message( "package-locks", - {"created": package_locks, - "deleted": package_locks}, - optional=["created", "deleted"]) + {"created": package_locks, "deleted": package_locks}, + optional=["created", "deleted"], +) CHANGE_PACKAGE_HOLDS = Message( "change-package-holds", - {"created": List(Unicode()), - "deleted": List(Unicode())}, - optional=["created", "deleted"]) + {"created": List(Unicode()), "deleted": List(Unicode())}, + optional=["created", "deleted"], +) CHANGE_PACKAGES_RESULT = Message( "change-packages-result", - {"operation-id": Int(), - "must-install": List(Any(Int(), Constant(None))), - "must-remove": List(Any(Int(), Constant(None))), - "result-code": Int(), - "result-text": Unicode()}, - optional=["result-text", "must-install", "must-remove"]) - -UNKNOWN_PACKAGE_HASHES = Message("unknown-package-hashes", { - "hashes": List(Bytes()), - "request-id": Int(), - }) + { + "operation-id": Int(), + "must-install": List(Any(Int(), Constant(None))), + "must-remove": List(Any(Int(), Constant(None))), + "result-code": Int(), + "result-text": Unicode(), + }, + optional=["result-text", "must-install", "must-remove"], +) + +UNKNOWN_PACKAGE_HASHES = Message( + "unknown-package-hashes", + { + "hashes": List(Bytes()), + "request-id": Int(), + }, +) PACKAGE_REPORTER_RESULT = Message( - "package-reporter-result", { - "report-timestamp": Float(), - "code": Int(), - "err": Unicode()}, - optional=["report-timestamp"]) - -ADD_PACKAGES = Message("add-packages", { - "packages": List(KeyDict({"name": Unicode(), - "description": Unicode(), - "section": Unicode(), - "relations": List(Tuple(Int(), Unicode())), - "summary": Unicode(), - "installed-size": Any(Int(), Constant(None)), - "size": Any(Int(), Constant(None)), - "version": Unicode(), - "type": Int(), - })), - "request-id": Int(), - }) + "package-reporter-result", + {"report-timestamp": Float(), "code": Int(), "err": Unicode()}, + optional=["report-timestamp"], +) + +ADD_PACKAGES = Message( + "add-packages", + { + "packages": List( + KeyDict( + { + "name": Unicode(), + "description": Unicode(), + "section": Unicode(), + "relations": List(Tuple(Int(), Unicode())), + "summary": Unicode(), + "installed-size": Any(Int(), Constant(None)), + "size": Any(Int(), Constant(None)), + "version": Unicode(), + "type": Int(), + }, + ), + ), + "request-id": Int(), + }, +) -TEXT_MESSAGE = Message("text-message", { - "message": Unicode()}) +TEXT_MESSAGE = Message("text-message", {"message": Unicode()}) TEST = Message( "test", - {"greeting": Bytes(), - "consistency-error": Bool(), - "echo": Bytes(), - "sequence": Int()}, - optional=["greeting", "consistency-error", "echo", "sequence"]) + { + "greeting": Bytes(), + "consistency-error": Bool(), + "echo": Bytes(), + "sequence": Int(), + }, + optional=["greeting", "consistency-error", "echo", "sequence"], +) # The tuples are timestamp, value -GRAPH_DATA = KeyDict({"values": List(Tuple(Float(), Float())), - "error": Unicode(), - "script-hash": Bytes()}) +GRAPH_DATA = KeyDict( + { + "values": List(Tuple(Float(), Float())), + "error": Unicode(), + "script-hash": Bytes(), + }, +) -CUSTOM_GRAPH = Message("custom-graph", { - "data": Dict(Int(), GRAPH_DATA)}) +CUSTOM_GRAPH = Message("custom-graph", {"data": Dict(Int(), GRAPH_DATA)}) # XXX This is kept for backward compatibility, it can eventually be removed # when all clients will support REBOOT_REQUIRED_INFO -REBOOT_REQUIRED = Message( - "reboot-required", - {"flag": Bool()}) +REBOOT_REQUIRED = Message("reboot-required", {"flag": Bool()}) REBOOT_REQUIRED_INFO = Message( "reboot-required-info", - {"flag": Bool(), - "packages": List(Unicode())}, - optional=["flag", "packages"]) + {"flag": Bool(), "packages": List(Unicode())}, + optional=["flag", "packages"], +) APT_PREFERENCES = Message( "apt-preferences", - {"data": Any(Dict(Unicode(), Unicode()), Constant(None))}) + {"data": Any(Dict(Unicode(), Unicode()), Constant(None))}, +) EUCALYPTUS_INFO = Message( "eucalyptus-info", - {"basic_info": Dict(Bytes(), Any(Bytes(), Constant(None))), - "walrus_info": Bytes(), - "cluster_controller_info": Bytes(), - "storage_controller_info": Bytes(), - "node_controller_info": Bytes(), - "capacity_info": Bytes()}, - optional=["capacity_info"]) - -EUCALYPTUS_INFO_ERROR = Message( - "eucalyptus-info-error", - {"error": Bytes()}) + { + "basic_info": Dict(Bytes(), Any(Bytes(), Constant(None))), + "walrus_info": Bytes(), + "cluster_controller_info": Bytes(), + "storage_controller_info": Bytes(), + "node_controller_info": Bytes(), + "capacity_info": Bytes(), + }, + optional=["capacity_info"], +) + +EUCALYPTUS_INFO_ERROR = Message("eucalyptus-info-error", {"error": Bytes()}) # The network-device message is split in two top level keys because we don't # support adding sub-keys in a backwards-compatible way (only top-level keys). @@ -484,17 +710,25 @@ # simply ignore the extra info.. NETWORK_DEVICE = Message( "network-device", - {"devices": List(KeyDict({"interface": Bytes(), - "ip_address": Bytes(), - "mac_address": Bytes(), - "broadcast_address": Bytes(), - "netmask": Bytes(), - "flags": Int()})), - - "device-speeds": List(KeyDict({"interface": Bytes(), - "speed": Int(), - "duplex": Bool()}))}, - optional=["device-speeds"]) + { + "devices": List( + KeyDict( + { + "interface": Bytes(), + "ip_address": Bytes(), + "mac_address": Bytes(), + "broadcast_address": Bytes(), + "netmask": Bytes(), + "flags": Int(), + }, + ), + ), + "device-speeds": List( + KeyDict({"interface": Bytes(), "speed": Int(), "duplex": Bool()}), + ), + }, + optional=["device-speeds"], +) NETWORK_ACTIVITY = Message( @@ -503,29 +737,103 @@ # an interface a is a list of 3-tuples (step, in, out), where 'step' is the # time interval and 'in'/'out' are number of bytes received/sent over the # interval. - {"activities": Dict(Bytes(), List(Tuple(Int(), Int(), Int())))}) + {"activities": Dict(Bytes(), List(Tuple(Int(), Int(), Int())))}, +) UPDATE_MANAGER_INFO = Message("update-manager-info", {"prompt": Unicode()}) COMPUTER_TAGS = Message( "computer-tags", - {"tags": Any(Unicode(), Constant(None))}) + {"tags": Any(Unicode(), Constant(None))}, +) + +UBUNTU_PRO_INFO = Message("ubuntu-pro-info", {"ubuntu-pro-info": Unicode()}) + +LIVEPATCH = Message("livepatch", {"livepatch": Unicode()}) -UBUNTU_PRO_INFO = Message( - "ubuntu-pro-info", - {"ubuntu-pro-info": Unicode()}) +UBUNTU_PRO_REBOOT_REQUIRED = Message( + "ubuntu-pro-reboot-required", + {"ubuntu-pro-reboot-required": Unicode()}, +) + +SNAPS = Message( + "snaps", + { + "snaps": KeyDict( + { + "installed": List( + KeyDict( + { + "id": Unicode(), + "name": Unicode(), + "version": Unicode(), + "revision": Unicode(), + "tracking-channel": Unicode(), + "hold": Unicode(), + "publisher": KeyDict( + { + "username": Unicode(), + "validation": Unicode(), + }, + strict=False, + ), + "confinement": Unicode(), + "summary": Unicode(), + }, + strict=False, + optional=["hold", "summary"], + ), + ), + }, + ), + }, +) message_schemas = ( - ACTIVE_PROCESS_INFO, COMPUTER_UPTIME, CLIENT_UPTIME, - OPERATION_RESULT, COMPUTER_INFO, DISTRIBUTION_INFO, - HARDWARE_INVENTORY, HARDWARE_INFO, LOAD_AVERAGE, MEMORY_INFO, - RESYNCHRONIZE, MOUNT_ACTIVITY, MOUNT_INFO, FREE_SPACE, - REGISTER, REGISTER_3_3, - TEMPERATURE, PROCESSOR_INFO, USERS, PACKAGES, PACKAGE_LOCKS, - CHANGE_PACKAGES_RESULT, UNKNOWN_PACKAGE_HASHES, - ADD_PACKAGES, PACKAGE_REPORTER_RESULT, TEXT_MESSAGE, TEST, - CUSTOM_GRAPH, REBOOT_REQUIRED, APT_PREFERENCES, - NETWORK_DEVICE, NETWORK_ACTIVITY, - REBOOT_REQUIRED_INFO, UPDATE_MANAGER_INFO, CPU_USAGE, - CEPH_USAGE, SWIFT_USAGE, SWIFT_DEVICE_INFO, KEYSTONE_TOKEN, - JUJU_UNITS_INFO, CLOUD_METADATA, COMPUTER_TAGS, UBUNTU_PRO_INFO) + ACTIVE_PROCESS_INFO, + COMPUTER_UPTIME, + CLIENT_UPTIME, + OPERATION_RESULT, + COMPUTER_INFO, + DISTRIBUTION_INFO, + HARDWARE_INVENTORY, + HARDWARE_INFO, + LOAD_AVERAGE, + MEMORY_INFO, + RESYNCHRONIZE, + MOUNT_ACTIVITY, + MOUNT_INFO, + FREE_SPACE, + REGISTER, + REGISTER_3_3, + TEMPERATURE, + PROCESSOR_INFO, + USERS, + PACKAGES, + PACKAGE_LOCKS, + CHANGE_PACKAGES_RESULT, + UNKNOWN_PACKAGE_HASHES, + ADD_PACKAGES, + PACKAGE_REPORTER_RESULT, + TEXT_MESSAGE, + TEST, + CUSTOM_GRAPH, + REBOOT_REQUIRED, + APT_PREFERENCES, + NETWORK_DEVICE, + NETWORK_ACTIVITY, + REBOOT_REQUIRED_INFO, + UPDATE_MANAGER_INFO, + CPU_USAGE, + CEPH_USAGE, + SWIFT_USAGE, + SWIFT_DEVICE_INFO, + KEYSTONE_TOKEN, + JUJU_UNITS_INFO, + CLOUD_METADATA, + COMPUTER_TAGS, + UBUNTU_PRO_INFO, + LIVEPATCH, + UBUNTU_PRO_REBOOT_REQUIRED, + SNAPS, +) diff -Nru landscape-client-23.02/landscape/message_schemas/test_message.py landscape-client-23.08/landscape/message_schemas/test_message.py --- landscape-client-23.02/landscape/message_schemas/test_message.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/message_schemas/test_message.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,17 +1,18 @@ import unittest -from landscape.lib.schema import Constant, Int +from landscape.lib.schema import Constant +from landscape.lib.schema import Int from landscape.message_schemas.message import Message class MessageTest(unittest.TestCase): - def test_coerce(self): """The L{Message} schema should be very similar to KeyDict.""" schema = Message("foo", {"data": Int()}) self.assertEqual( schema.coerce({"type": "foo", "data": 3}), - {"type": "foo", "data": 3}) + {"type": "foo", "data": 3}, + ) def test_coerce_bytes_to_str(self): """ @@ -26,21 +27,24 @@ schema = Message("bar", {}) self.assertEqual( schema.coerce({"type": "bar", "timestamp": 0.33}), - {"type": "bar", "timestamp": 0.33}) + {"type": "bar", "timestamp": 0.33}, + ) def test_api(self): """L{Message} schemas should accept C{api} keys.""" schema = Message("baz", {}) self.assertEqual( schema.coerce({"type": "baz", "api": b"whatever"}), - {"type": "baz", "api": b"whatever"}) + {"type": "baz", "api": b"whatever"}, + ) - def test_api_None(self): + def test_api_none(self): """L{Message} schemas should accept None for C{api}.""" schema = Message("baz", {}) self.assertEqual( schema.coerce({"type": "baz", "api": None}), - {"type": "baz", "api": None}) + {"type": "baz", "api": None}, + ) def test_optional(self): """The L{Message} schema should allow additional optional keys.""" @@ -57,5 +61,7 @@ The L{Message} schema discards unknown fields when coercing values. """ schema = Message("foo", {}) - self.assertEqual({"type": "foo"}, - schema.coerce({"type": "foo", "crap": 123})) + self.assertEqual( + {"type": "foo"}, + schema.coerce({"type": "foo", "crap": 123}), + ) diff -Nru landscape-client-23.02/landscape/sysinfo/deployment.py landscape-client-23.08/landscape/sysinfo/deployment.py --- landscape-client-23.02/landscape/sysinfo/deployment.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/deployment.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,19 +1,29 @@ """Deployment code for the sysinfo tool.""" import os import sys -from logging import getLogger, Formatter +from logging import Formatter +from logging import getLogger from logging.handlers import RotatingFileHandler +from twisted.internet.defer import Deferred +from twisted.internet.defer import maybeDeferred from twisted.python.reflect import namedClass -from twisted.internet.defer import Deferred, maybeDeferred from landscape import VERSION from landscape.lib.config import BaseConfiguration -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry, format_sysinfo +from landscape.sysinfo.sysinfo import format_sysinfo +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry -ALL_PLUGINS = ["Load", "Disk", "Memory", "Temperature", "Processes", - "LoggedInUsers", "Network"] +ALL_PLUGINS = [ + "Load", + "Disk", + "Memory", + "Temperature", + "Processes", + "LoggedInUsers", + "Network", +] class SysInfoConfiguration(BaseConfiguration): @@ -24,13 +34,14 @@ default_config_filenames = ("/etc/landscape/client.conf",) if os.getuid() != 0: default_config_filenames += ( - os.path.expanduser("~/.landscape/sysinfo.conf"),) + os.path.expanduser("~/.landscape/sysinfo.conf"), + ) default_data_dir = "/var/lib/landscape/client/" config_section = "sysinfo" def __init__(self): - super(SysInfoConfiguration, self).__init__() + super().__init__() self._command_line_defaults["config"] = None @@ -39,18 +50,31 @@ Specialize L{Configuration.make_parser}, adding any sysinfo-specific options. """ - parser = super(SysInfoConfiguration, self).make_parser() + parser = super().make_parser() - parser.add_option("--sysinfo-plugins", metavar="PLUGIN_LIST", - help="Comma-delimited list of sysinfo plugins to " - "use. Default is to use all plugins.") - - parser.add_option("--exclude-sysinfo-plugins", metavar="PLUGIN_LIST", - help="Comma-delimited list of sysinfo plugins to " - "NOT use. This always take precedence over " - "plugins to include.") + parser.add_option( + "--sysinfo-plugins", + metavar="PLUGIN_LIST", + help="Comma-delimited list of sysinfo plugins to " + "use. Default is to use all plugins.", + ) + + parser.add_option( + "--exclude-sysinfo-plugins", + metavar="PLUGIN_LIST", + help="Comma-delimited list of sysinfo plugins to " + "NOT use. This always take precedence over " + "plugins to include.", + ) + + parser.add_option( + "--width", + type=int, + default=80, + help="Maximum width for each column of output.", + ) - parser.epilog = "Default plugins: %s" % (", ".join(ALL_PLUGINS)) + parser.epilog = "Default plugins: {}".format(", ".join(ALL_PLUGINS)) return parser def get_plugin_names(self, plugin_spec): @@ -66,9 +90,12 @@ else: exclude = self.get_plugin_names(self.exclude_sysinfo_plugins) plugins = [x for x in include if x not in exclude] - return [namedClass("landscape.sysinfo.%s.%s" - % (plugin_name.lower(), plugin_name))() - for plugin_name in plugins] + return [ + namedClass( + f"landscape.sysinfo.{plugin_name.lower()}.{plugin_name}", + )() + for plugin_name in plugins + ] def get_landscape_log_directory(landscape_dir=None): @@ -91,8 +118,11 @@ if not os.path.isdir(landscape_dir): os.mkdir(landscape_dir) log_filename = os.path.join(landscape_dir, "sysinfo.log") - handler = RotatingFileHandler(log_filename, - maxBytes=500 * 1024, backupCount=1) + handler = RotatingFileHandler( + log_filename, + maxBytes=500 * 1024, + backupCount=1, + ) logger.addHandler(handler) handler.setFormatter(Formatter("%(asctime)s %(levelname)-8s %(message)s")) @@ -103,8 +133,8 @@ """ try: setup_logging() - except IOError as e: - sys.exit("Unable to setup logging. %s" % e) + except OSError as e: + sys.exit(f"Unable to setup logging. {e}") if sysinfo is None: sysinfo = SysInfoPluginRegistry() @@ -116,8 +146,15 @@ sysinfo.add(plugin) def show_output(result): - print(format_sysinfo(sysinfo.get_headers(), sysinfo.get_notes(), - sysinfo.get_footnotes(), indent=" ")) + print( + format_sysinfo( + sysinfo.get_headers(), + sysinfo.get_notes(), + sysinfo.get_footnotes(), + width=config.width, + indent=" ", + ), + ) def run_sysinfo(): return sysinfo.run().addCallback(show_output) @@ -128,7 +165,8 @@ # running. done = Deferred() reactor.callWhenRunning( - lambda: maybeDeferred(run_sysinfo).chainDeferred(done)) + lambda: maybeDeferred(run_sysinfo).chainDeferred(done), + ) def stop_reactor(result): # We won't need to use callLater here once we use Twisted >8. diff -Nru landscape-client-23.02/landscape/sysinfo/disk.py landscape-client-23.08/landscape/sysinfo/disk.py --- landscape-client-23.02/landscape/sysinfo/disk.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/disk.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,29 +1,30 @@ -from __future__ import division - import os from twisted.internet.defer import succeed -from landscape.lib.disk import (get_mount_info, get_filesystem_for_path) +from landscape.lib.disk import get_filesystem_for_path +from landscape.lib.disk import get_mount_info def format_megabytes(megabytes): if megabytes >= 1024 * 1024: - return "%.2fTB" % (megabytes / (1024 * 1024)) + return f"{megabytes / (1024 * 1024):.2f}TB" elif megabytes >= 1024: - return "%.2fGB" % (megabytes / 1024) + return f"{megabytes / 1024:.2f}GB" else: - return "%dMB" % (megabytes) + return f"{megabytes:d}MB" def usage(info): total = info["total-space"] used = total - info["free-space"] - return "%0.1f%% of %s" % ((used / total) * 100, format_megabytes(total)) - + return "{:0.1f}% of {}".format( + (used / total) * 100, + format_megabytes(total), + ) -class Disk(object): +class Disk: def __init__(self, mounts_file="/proc/mounts", statvfs=os.statvfs): self._mounts_file = mounts_file self._statvfs = statvfs @@ -32,13 +33,19 @@ self._sysinfo = sysinfo def run(self): - main_info = get_filesystem_for_path("/home", self._mounts_file, - self._statvfs) + main_info = get_filesystem_for_path( + "/home", + self._mounts_file, + self._statvfs, + ) if main_info is not None: total = main_info["total-space"] if total <= 0: root_main_info = get_filesystem_for_path( - "/", self._mounts_file, self._statvfs) + "/", + self._mounts_file, + self._statvfs, + ) if root_main_info is not None: total = root_main_info["total-space"] main_info = root_main_info @@ -46,8 +53,10 @@ main_usage = "unknown" else: main_usage = usage(main_info) - self._sysinfo.add_header("Usage of " + main_info["mount-point"], - main_usage) + self._sysinfo.add_header( + "Usage of " + main_info["mount-point"], + main_usage, + ) else: self._sysinfo.add_header("Usage of /home", "unknown") @@ -70,6 +79,7 @@ used = ((total - info["free-space"]) / total) * 100 if used >= 85: - self._sysinfo.add_note("%s is using %s" - % (info["mount-point"], usage(info))) + self._sysinfo.add_note( + "{} is using {}".format(info["mount-point"], usage(info)), + ) return succeed(None) diff -Nru landscape-client-23.02/landscape/sysinfo/landscapelink.py landscape-client-23.08/landscape/sysinfo/landscapelink.py --- landscape-client-23.02/landscape/sysinfo/landscapelink.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/landscapelink.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,13 +1,13 @@ from twisted.internet.defer import succeed -class LandscapeLink(object): - +class LandscapeLink: def register(self, sysinfo): self._sysinfo = sysinfo def run(self): self._sysinfo.add_footnote( "Graph this data and manage this system at:\n" - " https://landscape.canonical.com/") + " https://landscape.canonical.com/", + ) return succeed(None) diff -Nru landscape-client-23.02/landscape/sysinfo/load.py landscape-client-23.08/landscape/sysinfo/load.py --- landscape-client-23.02/landscape/sysinfo/load.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/load.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,12 +3,13 @@ from twisted.internet.defer import succeed -class Load(object): - +class Load: def register(self, sysinfo): self._sysinfo = sysinfo def run(self): self._sysinfo.add_header( - "System load", str(round(os.getloadavg()[0], 2))) + "System load", + str(round(os.getloadavg()[0], 2)), + ) return succeed(None) diff -Nru landscape-client-23.02/landscape/sysinfo/loggedinusers.py landscape-client-23.08/landscape/sysinfo/loggedinusers.py --- landscape-client-23.02/landscape/sysinfo/loggedinusers.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/loggedinusers.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,7 @@ from landscape.lib.sysstats import get_logged_in_users -class LoggedInUsers(object): - +class LoggedInUsers: def register(self, sysinfo): self._sysinfo = sysinfo @@ -11,6 +10,7 @@ def add_header(logged_users): self._sysinfo.add_header("Users logged in", str(len(logged_users))) + result = get_logged_in_users() result.addCallback(add_header) result.addErrback(lambda failure: None) diff -Nru landscape-client-23.02/landscape/sysinfo/memory.py landscape-client-23.08/landscape/sysinfo/memory.py --- landscape-client-23.02/landscape/sysinfo/memory.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/memory.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,8 +3,7 @@ from landscape.lib.sysstats import MemoryStats -class Memory(object): - +class Memory: def __init__(self, filename="/proc/meminfo"): self._filename = filename @@ -13,8 +12,12 @@ def run(self): memstats = MemoryStats(self._filename) - self._sysinfo.add_header("Memory usage", "%d%%" % - memstats.used_memory_percentage) - self._sysinfo.add_header("Swap usage", "%d%%" % - memstats.used_swap_percentage) + self._sysinfo.add_header( + "Memory usage", + f"{int(memstats.used_memory_percentage):d}%", + ) + self._sysinfo.add_header( + "Swap usage", + f"{int(memstats.used_swap_percentage):d}%", + ) return succeed(None) diff -Nru landscape-client-23.02/landscape/sysinfo/network.py landscape-client-23.08/landscape/sysinfo/network.py --- landscape-client-23.02/landscape/sysinfo/network.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/network.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,13 +1,14 @@ from functools import partial from operator import itemgetter -from netifaces import AF_INET, AF_INET6 +from netifaces import AF_INET +from netifaces import AF_INET6 from twisted.internet.defer import succeed from landscape.lib.network import get_active_device_info -class Network(object): +class Network: """Show information about active network interfaces. @param get_device_info: Optionally, a function that returns information @@ -16,8 +17,11 @@ def __init__(self, get_device_info=None): if get_device_info is None: - get_device_info = partial(get_active_device_info, - extended=True, default_only=True) + get_device_info = partial( + get_active_device_info, + extended=True, + default_only=True, + ) self._get_device_info = get_device_info def register(self, sysinfo): @@ -35,15 +39,19 @@ @return: A succeeded C{Deferred}. """ device_info = self._get_device_info() - for info in sorted(device_info, key=itemgetter('interface')): + for info in sorted(device_info, key=itemgetter("interface")): interface = info["interface"] ipv4_addresses = info["ip_addresses"].get(AF_INET, []) ipv6_addresses = info["ip_addresses"].get(AF_INET6, []) for addr in ipv4_addresses: self._sysinfo.add_header( - "IPv4 address for %s" % interface, addr['addr']) + f"IPv4 address for {interface}", + addr["addr"], + ) for addr in ipv6_addresses: self._sysinfo.add_header( - "IPv6 address for %s" % interface, addr['addr']) + f"IPv6 address for {interface}", + addr["addr"], + ) return succeed(None) diff -Nru landscape-client-23.02/landscape/sysinfo/processes.py landscape-client-23.08/landscape/sysinfo/processes.py --- landscape-client-23.02/landscape/sysinfo/processes.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/processes.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,10 +1,9 @@ -from twisted.internet.defer import succeed - -from landscape.lib.process import ProcessInformation +import os +from twisted.internet.defer import succeed -class Processes(object): +class Processes: def __init__(self, proc_dir="/proc"): self._proc_dir = proc_dir @@ -14,16 +13,27 @@ def run(self): num_processes = 0 num_zombies = 0 - info = ProcessInformation(proc_dir=self._proc_dir) - for process_info in info.get_all_process_info(): + for pid in os.listdir(self._proc_dir): + if not pid.isdigit(): + continue + status_path = os.path.join(self._proc_dir, pid, "stat") + + try: + with open(status_path, "rb") as fd: + data = fd.read() + except IOError: + continue + num_processes += 1 - if process_info["state"] == b"Z": + + if b"Z" == data.split(b" ", 3)[2]: num_zombies += 1 + if num_zombies: if num_zombies == 1: msg = "There is 1 zombie process." else: - msg = "There are %d zombie processes." % (num_zombies,) + msg = f"There are {num_zombies:d} zombie processes." self._sysinfo.add_note(msg) self._sysinfo.add_header("Processes", str(num_processes)) return succeed(None) diff -Nru landscape-client-23.02/landscape/sysinfo/sysinfo.py landscape-client-23.08/landscape/sysinfo/sysinfo.py --- landscape-client-23.02/landscape/sysinfo/sysinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/sysinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,7 +1,7 @@ -import textwrap -from logging import getLogger import math import os +import textwrap +from logging import getLogger from twisted.python.failure import Failure @@ -37,7 +37,7 @@ """ def __init__(self): - super(SysInfoPluginRegistry, self).__init__() + super().__init__() self._header_index = {} self._headers = [] self._notes = [] @@ -106,22 +106,31 @@ def _log_plugin_error(self, failure, plugin): self._plugin_error = True - message = "%s plugin raised an exception." % plugin.__class__.__name__ + message = f"{plugin.__class__.__name__} plugin raised an exception." logger = getLogger("landscape-sysinfo") log_failure(failure, message, logger=logger) def _report_error_note(self, result): from landscape.sysinfo.deployment import get_landscape_log_directory + if self._plugin_error: path = os.path.join(get_landscape_log_directory(), "sysinfo.log") self.add_note( "There were exceptions while processing one or more plugins. " - "See %s for more information." % path) + f"See {path} for more information.", + ) return result -def format_sysinfo(headers=(), notes=(), footnotes=(), width=80, indent="", - column_separator=" ", note_prefix="=> "): +def format_sysinfo( + headers=(), + notes=(), + footnotes=(), + width=80, + indent="", + column_separator=" ", + note_prefix="=> ", +): """Format sysinfo headers, notes and footnotes to be displayed. This function will format headers notes and footnotes in a way that @@ -151,9 +160,13 @@ # and then we go back from there until we can fit things. min_length = width for header, value in headers: - min_length = min(min_length, len(header)+len(value)+2) # 2 for ": " - columns = int(math.ceil(float(width) / - (min_length + len(column_separator)))) + min_length = min( + min_length, + len(header) + len(value) + 2, + ) # 2 for ": " + columns = int( + math.ceil(float(width) / (min_length + len(column_separator))), + ) # Okay, we've got a base for the number of columns. Now, since # columns may have different lengths, and the length of each column @@ -191,8 +204,9 @@ # Account for the spacing between each column. total_length += len(column_separator) - total_length += (widest_header_len + widest_value_len + - len(value_separator)) + total_length += ( + widest_header_len + widest_value_len + len(value_separator) + ) # Keep track of these lengths for building the output later. header_lengths.append((widest_header_len, widest_value_len)) @@ -225,13 +239,15 @@ # Add inter-column spacing. line += column_separator # And append the column to the current line. - line += (header + - value_separator + - " " * (widest_header_len - len(header)) + - value) + line += ( + header + + value_separator + + " " * (widest_header_len - len(header)) + + value + ) # If there are more columns in this line, pad it up so # that the next column's header is correctly aligned. - if headers_len > (column+1) * headers_per_column + row: + if headers_len > (column + 1) * headers_per_column + row: line += " " * (widest_value_len - len(value)) lines.append(line) @@ -242,10 +258,13 @@ initial_indent = indent + note_prefix for note in notes: lines.extend( - textwrap.wrap(note, - initial_indent=initial_indent, - subsequent_indent=" "*len(initial_indent), - width=width)) + textwrap.wrap( + note, + initial_indent=initial_indent, + subsequent_indent=" " * len(initial_indent), + width=width, + ), + ) if footnotes: if lines: diff -Nru landscape-client-23.02/landscape/sysinfo/temperature.py landscape-client-23.08/landscape/sysinfo/temperature.py --- landscape-client-23.02/landscape/sysinfo/temperature.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/temperature.py 2023-08-17 21:19:32.000000000 +0000 @@ -3,8 +3,7 @@ from landscape.lib.sysstats import get_thermal_zones -class Temperature(object): - +class Temperature: def __init__(self, thermal_zone_path=None): self._thermal_zone_path = thermal_zone_path @@ -15,9 +14,9 @@ temperature = None max_value = None for zone in get_thermal_zones(self._thermal_zone_path): - if (zone.temperature_value is not None and - (max_value is None or zone.temperature_value > max_value) - ): + if zone.temperature_value is not None and ( + max_value is None or zone.temperature_value > max_value + ): temperature = zone.temperature max_value = zone.temperature_value diff -Nru landscape-client-23.02/landscape/sysinfo/testplugin.py landscape-client-23.08/landscape/sysinfo/testplugin.py --- landscape-client-23.02/landscape/sysinfo/testplugin.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/testplugin.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,8 +4,7 @@ current_instance = None -class TestPlugin(object): - +class TestPlugin: def __init__(self): self.sysinfo = None self.has_run = False diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_deployment.py landscape-client-23.08/landscape/sysinfo/tests/test_deployment.py --- landscape-client-23.02/landscape/sysinfo/tests/test_deployment.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_deployment.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,28 +1,30 @@ -from logging.handlers import RotatingFileHandler -from logging import getLogger import os import unittest +from logging import getLogger +from logging.handlers import RotatingFileHandler +from unittest import mock -import mock from twisted.internet.defer import Deferred from landscape.lib.fs import create_text_file -from landscape.lib.testing import ( - ConfigTestCase, TwistedTestCase, HelperTestCase, StandardIOHelper) - -from landscape.sysinfo.deployment import ( - SysInfoConfiguration, ALL_PLUGINS, run, setup_logging, - get_landscape_log_directory) -from landscape.sysinfo.testplugin import TestPlugin -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry -from landscape.sysinfo.load import Load +from landscape.lib.testing import ConfigTestCase +from landscape.lib.testing import HelperTestCase +from landscape.lib.testing import StandardIOHelper +from landscape.lib.testing import TwistedTestCase +from landscape.sysinfo.deployment import ALL_PLUGINS +from landscape.sysinfo.deployment import get_landscape_log_directory +from landscape.sysinfo.deployment import run +from landscape.sysinfo.deployment import setup_logging +from landscape.sysinfo.deployment import SysInfoConfiguration from landscape.sysinfo.landscapelink import LandscapeLink +from landscape.sysinfo.load import Load +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry +from landscape.sysinfo.testplugin import TestPlugin class DeploymentTest(ConfigTestCase, unittest.TestCase): - def setUp(self): - super(DeploymentTest, self).setUp() + super().setUp() class TestConfiguration(SysInfoConfiguration): default_config_filenames = [self.makeFile("")] @@ -30,8 +32,9 @@ self.configuration = TestConfiguration() def test_get_plugins(self): - self.configuration.load(["--sysinfo-plugins", "Load,TestPlugin", - "-d", self.makeDir()]) + self.configuration.load( + ["--sysinfo-plugins", "Load,TestPlugin", "-d", self.makeDir()], + ) plugins = self.configuration.get_plugins() self.assertEqual(len(plugins), 2) self.assertTrue(isinstance(plugins[0], Load)) @@ -50,8 +53,9 @@ def test_exclude_plugins(self): exclude = ",".join(x for x in ALL_PLUGINS if x != "Load") - self.configuration.load(["--exclude-sysinfo-plugins", exclude, - "-d", self.makeDir()]) + self.configuration.load( + ["--exclude-sysinfo-plugins", exclude, "-d", self.makeDir()], + ) plugins = self.configuration.get_plugins() self.assertEqual(len(plugins), 1) self.assertTrue(isinstance(plugins[0], Load)) @@ -65,7 +69,7 @@ self.assertTrue(isinstance(plugins[0], TestPlugin)) -class FakeReactor(object): +class FakeReactor: """ Something that's simpler and more reusable than a bunch of mocked objects. """ @@ -75,31 +79,35 @@ self.scheduled_calls = [] self.running = False - def callWhenRunning(self, callable): + def callWhenRunning(self, callable): # noqa: N802 self.queued_calls.append(callable) def run(self): self.running = True - def callLater(self, seconds, callable, *args, **kwargs): + def callLater(self, seconds, callable, *args, **kwargs): # noqa: N802 self.scheduled_calls.append((seconds, callable, args, kwargs)) def stop(self): self.running = False -class RunTest(HelperTestCase, ConfigTestCase, TwistedTestCase, - unittest.TestCase): +class RunTest( + HelperTestCase, + ConfigTestCase, + TwistedTestCase, + unittest.TestCase, +): helpers = [StandardIOHelper] def setUp(self): - super(RunTest, self).setUp() + super().setUp() self._old_filenames = SysInfoConfiguration.default_config_filenames SysInfoConfiguration.default_config_filenames = [self.makeFile("")] def tearDown(self): - super(RunTest, self).tearDown() + super().tearDown() SysInfoConfiguration.default_config_filenames = self._old_filenames logger = getLogger("landscape-sysinfo") for handler in logger.handlers[:]: @@ -112,23 +120,29 @@ self.assertEqual(current_instance.has_run, True) sysinfo = current_instance.sysinfo - self.assertEqual(sysinfo.get_headers(), - [("Test header", "Test value")]) + self.assertEqual( + sysinfo.get_headers(), + [("Test header", "Test value")], + ) self.assertEqual(sysinfo.get_notes(), ["Test note"]) self.assertEqual(sysinfo.get_footnotes(), ["Test footnote"]) @mock.patch("landscape.sysinfo.deployment.format_sysinfo") def test_format_sysinfo_gets_correct_information(self, format_sysinfo): - run(["--sysinfo-plugins", "TestPlugin"]) + run(["--sysinfo-plugins", "TestPlugin", "--width", "100"]) format_sysinfo.assert_called_once_with( [("Test header", "Test value")], - ["Test note"], ["Test footnote"], - indent=" ") + ["Test note"], + ["Test footnote"], + width=100, + indent=" ", + ) def test_format_sysinfo_output_is_printed(self): with mock.patch( - "landscape.sysinfo.deployment.format_sysinfo", - return_value="Hello there!") as format_sysinfo: + "landscape.sysinfo.deployment.format_sysinfo", + return_value="Hello there!", + ) as format_sysinfo: run(["--sysinfo-plugins", "TestPlugin"]) self.assertTrue(format_sysinfo.called) @@ -148,6 +162,7 @@ def wrapped_sysinfo_run(*args, **kwargs): original_sysinfo_run(*args, **kwargs) return deferred + sysinfo.run = mock.Mock(side_effect=wrapped_sysinfo_run) run(["--sysinfo-plugins", "TestPlugin"], sysinfo=sysinfo) @@ -193,7 +208,8 @@ self.assertEqual( self.stdout.getvalue(), - " Test header: Test value\n\n => Test note\n\n Test footnote\n") + " Test header: Test value\n\n => Test note\n\n Test footnote\n", + ) return d def test_stop_scheduled_in_callback(self): @@ -216,8 +232,11 @@ reactor = FakeReactor() sysinfo = SysInfoPluginRegistry() sysinfo.run = lambda: 1 / 0 - d = run(["--sysinfo-plugins", "TestPlugin"], reactor=reactor, - sysinfo=sysinfo) + d = run( + ["--sysinfo-plugins", "TestPlugin"], + reactor=reactor, + sysinfo=sysinfo, + ) for x in reactor.queued_calls: x() @@ -230,8 +249,10 @@ If landscape-sysinfo is running as a non-privileged user the log directory is stored in their home directory. """ - self.assertEqual(get_landscape_log_directory(), - os.path.expanduser("~/.landscape")) + self.assertEqual( + get_landscape_log_directory(), + os.path.expanduser("~/.landscape"), + ) def test_get_landscape_log_directory_privileged(self): """ @@ -240,7 +261,9 @@ """ with mock.patch("os.getuid", return_value=0) as uid_mock: self.assertEqual( - get_landscape_log_directory(), "/var/log/landscape") + get_landscape_log_directory(), + "/var/log/landscape", + ) uid_mock.assert_called_once_with() def test_wb_logging_setup(self): @@ -261,11 +284,21 @@ self.assertFalse(logger.propagate) def test_setup_logging_logs_to_var_log_if_run_as_root(self): - with mock.patch.object(os, "getuid", return_value=0) as mock_getuid, \ - mock.patch.object( - os.path, "isdir", return_value=False) as mock_isdir, \ - mock.patch.object(os, "mkdir") as mock_mkdir, \ - mock.patch("logging.open", create=True) as mock_open: + with mock.patch.object( + os, + "getuid", + return_value=0, + ) as mock_getuid, mock.patch.object( + os.path, + "isdir", + return_value=False, + ) as mock_isdir, mock.patch.object( + os, + "mkdir", + ) as mock_mkdir, mock.patch( + "logging.open", + create=True, + ) as mock_open: logger = getLogger("landscape-sysinfo") self.assertEqual(logger.handlers, []) @@ -276,12 +309,14 @@ mock_mkdir.assert_called_with("/var/log/landscape") self.assertEqual( mock_open.call_args_list[0][0], - ("/var/log/landscape/sysinfo.log", "a") + ("/var/log/landscape/sysinfo.log", "a"), ) handler = logger.handlers[0] self.assertTrue(isinstance(handler, RotatingFileHandler)) - self.assertEqual(handler.baseFilename, - "/var/log/landscape/sysinfo.log") + self.assertEqual( + handler.baseFilename, + "/var/log/landscape/sysinfo.log", + ) def test_create_log_dir(self): log_dir = self.makeFile() @@ -291,17 +326,23 @@ def test_run_sets_up_logging(self): with mock.patch( - "landscape.sysinfo.deployment" - ".setup_logging") as setup_logging_mock: + "landscape.sysinfo.deployment.setup_logging", + ) as setup_logging_mock: run(["--sysinfo-plugins", "TestPlugin"]) setup_logging_mock.assert_called_once_with() def test_run_setup_logging_exits_gracefully(self): io_error = IOError("Read-only filesystem.") with mock.patch( - "landscape.sysinfo.deployment.setup_logging", - side_effect=io_error): + "landscape.sysinfo.deployment.setup_logging", + side_effect=io_error, + ): error = self.assertRaises( - SystemExit, run, ["--sysinfo-plugins", "TestPlugin"]) + SystemExit, + run, + ["--sysinfo-plugins", "TestPlugin"], + ) self.assertEqual( - error.code, "Unable to setup logging. Read-only filesystem.") + error.code, + "Unable to setup logging. Read-only filesystem.", + ) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_disk.py landscape-client-23.08/landscape/sysinfo/tests/test_disk.py --- landscape-client-23.02/landscape/sysinfo/tests/test_disk.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_disk.py 2023-08-17 21:19:32.000000000 +0000 @@ -4,30 +4,40 @@ from twisted.internet.defer import Deferred from landscape.lib.testing import FSTestCase -from landscape.sysinfo.disk import Disk, format_megabytes +from landscape.sysinfo.disk import Disk +from landscape.sysinfo.disk import format_megabytes from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class DiskTest(FSTestCase, unittest.TestCase): - def setUp(self): - super(DiskTest, self).setUp() + super().setUp() self.mount_file = self.makeFile("") self.stat_results = {} - self.disk = Disk(mounts_file=self.mount_file, - statvfs=self.stat_results.get) + self.disk = Disk( + mounts_file=self.mount_file, + statvfs=self.stat_results.get, + ) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.disk) - def add_mount(self, point, block_size=4096, capacity=1000, unused=1000, - fs="ext3", device=None): + def add_mount( + self, + point, + block_size=4096, + capacity=1000, + unused=1000, + fs="ext3", + device=None, + ): if device is None: device = "/dev/" + point.replace("/", "_") self.stat_results[point] = os.statvfs_result( - (block_size, 0, capacity, unused, 0, 0, 0, 0, 0, 0)) + (block_size, 0, capacity, unused, 0, 0, 0, 0, 0, 0), + ) f = open(self.mount_file, "a") - f.write("/dev/%s %s %s rw 0 0\n" % (device, point, fs)) + f.write(f"/dev/{device} {point} {fs} rw 0 0\n") f.close() def test_run_returns_succeeded_deferred(self): @@ -66,8 +76,10 @@ self.add_mount("/home", capacity=0, unused=0) self.add_mount("/", capacity=1000, unused=1000) self.disk.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Usage of /", "0.0% of 3MB")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Usage of /", "0.0% of 3MB")], + ) def test_zero_total_space_for_home_and_root(self): """ @@ -77,8 +89,10 @@ self.add_mount("/home", capacity=0, unused=0) self.add_mount("/", capacity=0, unused=0) self.disk.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Usage of /", "unknown")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Usage of /", "unknown")], + ) def test_over_85_percent(self): """ @@ -87,12 +101,14 @@ """ self.add_mount("/", capacity=1000000, unused=150000) self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/ is using 85.0% of 3.81GB"]) + self.assertEqual( + self.sysinfo.get_notes(), + ["/ is using 85.0% of 3.81GB"], + ) def test_under_85_percent(self): - """No note is displayed for a filesystem using less than 85% capacity. - """ + """No note is displayed for a filesystem using less + than 85% capacity.""" self.add_mount("/", block_size=1024, capacity=1000000, unused=151000) self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) @@ -104,13 +120,22 @@ """ self.add_mount("/", block_size=1024, capacity=1000000, unused=150000) self.add_mount( - "/use", block_size=2048, capacity=2000000, unused=200000) + "/use", + block_size=2048, + capacity=2000000, + unused=200000, + ) self.add_mount( - "/emp", block_size=4096, capacity=3000000, unused=460000) - self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/ is using 85.0% of 976MB", - "/use is using 90.0% of 3.81GB"]) + "/emp", + block_size=4096, + capacity=3000000, + unused=460000, + ) + self.disk.run() + self.assertEqual( + self.sysinfo.get_notes(), + ["/ is using 85.0% of 976MB", "/use is using 90.0% of 3.81GB"], + ) def test_format_megabytes(self): self.assertEqual(format_megabytes(100), "100MB") @@ -127,8 +152,10 @@ self.add_mount("/") self.add_mount("/home", capacity=1024, unused=512) self.disk.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Usage of /home", "50.0% of 4MB")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Usage of /home", "50.0% of 4MB")], + ) def test_header_shows_actual_filesystem(self): """ @@ -137,8 +164,10 @@ """ self.add_mount("/", capacity=1024, unused=512) self.disk.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Usage of /", "50.0% of 4MB")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Usage of /", "50.0% of 4MB")], + ) def test_ignore_boring_filesystem_types(self): """ @@ -151,11 +180,19 @@ self.add_mount("/", capacity=1000, unused=1000, fs="ext3") self.add_mount("/media/dvdrom", capacity=1000, unused=0, fs="udf") self.add_mount("/media/cdrom", capacity=1000, unused=0, fs="iso9660") - self.add_mount("/home/radix/.gvfs", capacity=1000, unused=0, - fs="fuse.gvfs-fuse-daemon") + self.add_mount( + "/home/radix/.gvfs", + capacity=1000, + unused=0, + fs="fuse.gvfs-fuse-daemon", + ) self.add_mount("/mnt/livecd", capacity=1000, unused=0, fs="squashfs") - self.add_mount("/home/mg/.Private", capacity=1000, unused=0, - fs="ecryptfs") + self.add_mount( + "/home/mg/.Private", + capacity=1000, + unused=0, + fs="ecryptfs", + ) self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) @@ -163,24 +200,38 @@ self.add_mount("/", capacity=0, unused=0, fs="ext4") self.add_mount("/", capacity=1000, unused=1, fs="ext3") self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/ is using 100.0% of 3MB"]) + self.assertEqual( + self.sysinfo.get_notes(), + ["/ is using 100.0% of 3MB"], + ) def test_no_duplicate_devices(self): self.add_mount("/", capacity=1000, unused=1, device="/dev/horgle") - self.add_mount("/dev/.static/dev", capacity=1000, unused=1, - device="/dev/horgle") - self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/ is using 100.0% of 3MB"]) + self.add_mount( + "/dev/.static/dev", + capacity=1000, + unused=1, + device="/dev/horgle", + ) + self.disk.run() + self.assertEqual( + self.sysinfo.get_notes(), + ["/ is using 100.0% of 3MB"], + ) def test_shorter_mount_point_in_case_of_duplicate_devices(self): - self.add_mount("/dev/.static/dev", capacity=1000, unused=1, - device="/dev/horgle") + self.add_mount( + "/dev/.static/dev", + capacity=1000, + unused=1, + device="/dev/horgle", + ) self.add_mount("/", capacity=1000, unused=1, device="/dev/horgle") self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/ is using 100.0% of 3MB"]) + self.assertEqual( + self.sysinfo.get_notes(), + ["/ is using 100.0% of 3MB"], + ) def test_shorter_not_lexical(self): """ @@ -191,8 +242,10 @@ self.add_mount("/abc", capacity=1000, unused=1, device="/dev/horgle") self.add_mount("/b", capacity=1000, unused=1, device="/dev/horgle") self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/b is using 100.0% of 3MB"]) + self.assertEqual( + self.sysinfo.get_notes(), + ["/b is using 100.0% of 3MB"], + ) def test_duplicate_device_and_duplicate_mountpoint_horribleness(self): """ @@ -210,11 +263,17 @@ """ self.add_mount("/", capacity=0, unused=0, device="rootfs") self.add_mount("/", capacity=1000, unused=1, device="/dev/horgle") - self.add_mount("/dev/.static/dev", capacity=1000, unused=1, - device="/dev/horgle") - self.disk.run() - self.assertEqual(self.sysinfo.get_notes(), - ["/ is using 100.0% of 3MB"]) + self.add_mount( + "/dev/.static/dev", + capacity=1000, + unused=1, + device="/dev/horgle", + ) + self.disk.run() + self.assertEqual( + self.sysinfo.get_notes(), + ["/ is using 100.0% of 3MB"], + ) def test_ignore_filesystems(self): """ @@ -233,8 +292,10 @@ self.add_mount("/", capacity=1000, unused=1000, fs="nfs") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) - self.assertEqual(self.sysinfo.get_headers(), - [("Usage of /home", "unknown")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Usage of /home", "unknown")], + ) def test_nfs_as_root_but_not_home(self): """ @@ -245,5 +306,7 @@ self.add_mount("/home", capacity=0, unused=0, fs="ext3") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) - self.assertEqual(self.sysinfo.get_headers(), - [("Usage of /home", "unknown")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Usage of /home", "unknown")], + ) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_landscapelink.py landscape-client-23.08/landscape/sysinfo/tests/test_landscapelink.py --- landscape-client-23.02/landscape/sysinfo/tests/test_landscapelink.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_landscapelink.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,13 @@ import unittest from landscape.lib.testing import TwistedTestCase -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.landscapelink import LandscapeLink +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class LandscapeLinkTest(TwistedTestCase, unittest.TestCase): - def setUp(self): - super(LandscapeLinkTest, self).setUp() + super().setUp() self.landscape_link = LandscapeLink() self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.landscape_link) @@ -20,5 +19,8 @@ self.landscape_link.run() self.assertEqual( self.sysinfo.get_footnotes(), - ["Graph this data and manage this system at:\n" - " https://landscape.canonical.com/"]) + [ + "Graph this data and manage this system at:\n" + " https://landscape.canonical.com/", + ], + ) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_load.py landscape-client-23.08/landscape/sysinfo/tests/test_load.py --- landscape-client-23.02/landscape/sysinfo/tests/test_load.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_load.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,14 @@ import unittest - -import mock +from unittest import mock from landscape.lib.testing import TwistedTestCase -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.load import Load +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class LoadTest(TwistedTestCase, unittest.TestCase): - def setUp(self): - super(LoadTest, self).setUp() + super().setUp() self.load = Load() self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.load) @@ -23,5 +21,4 @@ self.load.run() mock_getloadavg.assert_called_once_with() - self.assertEqual(self.sysinfo.get_headers(), - [("System load", "1.5")]) + self.assertEqual(self.sysinfo.get_headers(), [("System load", "1.5")]) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_loggedinusers.py landscape-client-23.08/landscape/sysinfo/tests/test_loggedinusers.py --- landscape-client-23.02/landscape/sysinfo/tests/test_loggedinusers.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_loggedinusers.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,12 +1,11 @@ -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry -from landscape.sysinfo.loggedinusers import LoggedInUsers from landscape.lib.tests.test_sysstats import FakeWhoQTest +from landscape.sysinfo.loggedinusers import LoggedInUsers +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class LoggedInUsersTest(FakeWhoQTest): - def setUp(self): - super(LoggedInUsersTest, self).setUp() + super().setUp() self.logged_users = LoggedInUsers() self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.logged_users) @@ -16,8 +15,11 @@ result = self.logged_users.run() def check_headers(result): - self.assertEqual(self.sysinfo.get_headers(), - [("Users logged in", "3")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Users logged in", "3")], + ) + return result.addCallback(check_headers) def test_order_is_preserved_even_if_asynchronous(self): @@ -27,10 +29,11 @@ self.sysinfo.add_header("After", "2") def check_headers(result): - self.assertEqual(self.sysinfo.get_headers(), - [("Before", "1"), - ("Users logged in", "3"), - ("After", "2")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Before", "1"), ("Users logged in", "3"), ("After", "2")], + ) + return result.addCallback(check_headers) def test_ignore_errors_on_command(self): @@ -44,4 +47,5 @@ def check_headers(result): self.assertEqual(self.sysinfo.get_headers(), []) + return result.addCallback(check_headers) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_memory.py landscape-client-23.08/landscape/sysinfo/tests/test_memory.py --- landscape-client-23.02/landscape/sysinfo/tests/test_memory.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_memory.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,8 +1,9 @@ import unittest -from landscape.lib.testing import TwistedTestCase, FSTestCase -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry +from landscape.lib.testing import FSTestCase +from landscape.lib.testing import TwistedTestCase from landscape.sysinfo.memory import Memory +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry MEMINFO_SAMPLE = """ @@ -38,9 +39,8 @@ class MemoryTest(FSTestCase, TwistedTestCase, unittest.TestCase): - def setUp(self): - super(MemoryTest, self).setUp() + super().setUp() self.memory = Memory(self.makeFile(MEMINFO_SAMPLE)) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.memory) @@ -50,6 +50,7 @@ def test_run_adds_header(self): self.memory.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Memory usage", "27%"), - ("Swap usage", "39%")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Memory usage", "27%"), ("Swap usage", "39%")], + ) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_network.py landscape-client-23.08/landscape/sysinfo/tests/test_network.py --- landscape-client-23.02/landscape/sysinfo/tests/test_network.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_network.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,16 +1,16 @@ import unittest -from netifaces import AF_INET, AF_INET6 +from netifaces import AF_INET +from netifaces import AF_INET6 from landscape.lib.testing import TwistedTestCase -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.network import Network +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class NetworkTest(TwistedTestCase, unittest.TestCase): - def setUp(self): - super(NetworkTest, self).setUp() + super().setUp() self.result = [] self.network = Network(lambda: self.result) self.sysinfo = SysInfoPluginRegistry() @@ -25,26 +25,34 @@ A header is written to sysinfo output for each network device reported by L{get_active_device_info}. """ - self.result = [{ - "interface": "eth0", - "ip_address": "IGNORED", - "ip_addresses": { - AF_INET: [{"addr": "192.168.0.50"}]}}] + self.result = [ + { + "interface": "eth0", + "ip_address": "IGNORED", + "ip_addresses": {AF_INET: [{"addr": "192.168.0.50"}]}, + }, + ] self.network.run() - self.assertEqual([("IPv4 address for eth0", "192.168.0.50")], - self.sysinfo.get_headers()) + self.assertEqual( + [("IPv4 address for eth0", "192.168.0.50")], + self.sysinfo.get_headers(), + ) def test_run_single_interface_ipv6_address(self): - self.result = [{ - "interface": "eth0", - "ip_address": "IGNORED", - "ip_addresses": { - AF_INET6: [{"addr": "2001::1"}]}}] + self.result = [ + { + "interface": "eth0", + "ip_address": "IGNORED", + "ip_addresses": {AF_INET6: [{"addr": "2001::1"}]}, + }, + ] self.network.run() - self.assertEqual([("IPv6 address for eth0", "2001::1")], - self.sysinfo.get_headers()) + self.assertEqual( + [("IPv6 address for eth0", "2001::1")], + self.sysinfo.get_headers(), + ) def test_run_multiple_interface_multiple_addresses(self): """ @@ -57,35 +65,42 @@ "ip_addresses": { AF_INET: [ {"addr": "192.168.0.50"}, - {"addr": "192.168.1.50"}]}, - }, { + {"addr": "192.168.1.50"}, + ], + }, + }, + { "interface": "eth1", "ip_addresses": { AF_INET: [ {"addr": "192.168.2.50"}, - {"addr": "192.168.3.50"}], - AF_INET6: [ - {"addr": "2001::1"}, - {"addr": "2002::2"}]}, - }, { + {"addr": "192.168.3.50"}, + ], + AF_INET6: [{"addr": "2001::1"}, {"addr": "2002::2"}], + }, + }, + { "interface": "eth2", "ip_addresses": { - AF_INET6: [ - {"addr": "2003::3"}, - {"addr": "2004::4"}]}, - }] + AF_INET6: [{"addr": "2003::3"}, {"addr": "2004::4"}], + }, + }, + ] self.network.run() - self.assertEqual([ - ("IPv4 address for eth0", "192.168.0.50"), - ("IPv4 address for eth0", "192.168.1.50"), - ("IPv4 address for eth1", "192.168.2.50"), - ("IPv4 address for eth1", "192.168.3.50"), - ("IPv6 address for eth1", "2001::1"), - ("IPv6 address for eth1", "2002::2"), - ("IPv6 address for eth2", "2003::3"), - ("IPv6 address for eth2", "2004::4")], - self.sysinfo.get_headers()) + self.assertEqual( + [ + ("IPv4 address for eth0", "192.168.0.50"), + ("IPv4 address for eth0", "192.168.1.50"), + ("IPv4 address for eth1", "192.168.2.50"), + ("IPv4 address for eth1", "192.168.3.50"), + ("IPv6 address for eth1", "2001::1"), + ("IPv6 address for eth1", "2002::2"), + ("IPv6 address for eth2", "2003::3"), + ("IPv6 address for eth2", "2004::4"), + ], + self.sysinfo.get_headers(), + ) def test_run_without_network_devices(self): """ diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_processes.py landscape-client-23.08/landscape/sysinfo/tests/test_processes.py --- landscape-client-23.02/landscape/sysinfo/tests/test_processes.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_processes.py 2023-08-17 21:19:32.000000000 +0000 @@ -2,16 +2,16 @@ from twisted.internet.defer import Deferred -from landscape.lib.testing import ( - FSTestCase, TwistedTestCase, ProcessDataBuilder) -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry +from landscape.lib.testing import FSTestCase +from landscape.lib.testing import ProcessDataBuilder +from landscape.lib.testing import TwistedTestCase from landscape.sysinfo.processes import Processes +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class ProcessesTest(FSTestCase, TwistedTestCase, unittest.TestCase): - def setUp(self): - super(ProcessesTest, self).setUp() + super().setUp() self.fake_proc = self.makeDir() self.processes = Processes(proc_dir=self.fake_proc) self.sysinfo = SysInfoPluginRegistry() @@ -25,17 +25,22 @@ def callback(result): called.append(True) + result.addCallback(callback) self.assertTrue(called) def test_number_of_processes(self): """The number of processes is added as a header.""" for i in range(3): - self.builder.create_data(i, self.builder.RUNNING, uid=0, gid=0, - process_name="foo%d" % (i,)) + self.builder.create_data( + i, + self.builder.RUNNING, + uid=0, + gid=0, + process_name=f"foo{i:d}", + ) self.processes.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Processes", "3")]) + self.assertEqual(self.sysinfo.get_headers(), [("Processes", "3")]) def test_no_zombies(self): self.processes.run() @@ -43,17 +48,33 @@ def test_number_of_zombies(self): """The number of zombies is added as a note.""" - self.builder.create_data(99, self.builder.ZOMBIE, uid=0, gid=0, - process_name="ZOMBERS") + self.builder.create_data( + 99, + self.builder.ZOMBIE, + uid=0, + gid=0, + process_name="ZOMBERS", + stat_data="0 0 Z 0 0 0 0", + ) self.processes.run() - self.assertEqual(self.sysinfo.get_notes(), - ["There is 1 zombie process."]) + self.assertEqual( + self.sysinfo.get_notes(), + ["There is 1 zombie process."], + ) def test_multiple_zombies(self): """Stupid English, and its plurality""" for i in range(2): - self.builder.create_data(i, self.builder.ZOMBIE, uid=0, gid=0, - process_name="ZOMBERS%d" % (i,)) + self.builder.create_data( + i, + self.builder.ZOMBIE, + uid=0, + gid=0, + process_name=f"ZOMBERS{i:d}", + stat_data="0 0 Z 0 0 0 0", + ) self.processes.run() - self.assertEqual(self.sysinfo.get_notes(), - ["There are 2 zombie processes."]) + self.assertEqual( + self.sysinfo.get_notes(), + ["There are 2 zombie processes."], + ) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_sysinfo.py landscape-client-23.08/landscape/sysinfo/tests/test_sysinfo.py --- landscape-client-23.02/landscape/sysinfo/tests/test_sysinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_sysinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,20 +1,23 @@ -from logging import getLogger, StreamHandler -import mock import os import unittest - -from twisted.internet.defer import Deferred, succeed, fail +from logging import getLogger +from logging import StreamHandler +from unittest import mock + +from twisted.internet.defer import Deferred +from twisted.internet.defer import fail +from twisted.internet.defer import succeed from landscape.lib.compat import StringIO from landscape.lib.plugin import PluginRegistry from landscape.lib.testing import HelperTestCase -from landscape.sysinfo.sysinfo import SysInfoPluginRegistry, format_sysinfo +from landscape.sysinfo.sysinfo import format_sysinfo +from landscape.sysinfo.sysinfo import SysInfoPluginRegistry class SysInfoPluginRegistryTest(HelperTestCase): - def setUp(self): - super(SysInfoPluginRegistryTest, self).setUp() + super().setUp() self.sysinfo = SysInfoPluginRegistry() self.sysinfo_logfile = StringIO() self.handler = StreamHandler(self.sysinfo_logfile) @@ -22,7 +25,7 @@ self.logger.addHandler(self.handler) def tearDown(self): - super(SysInfoPluginRegistryTest, self).tearDown() + super().tearDown() self.logger.removeHandler(self.handler) def test_is_plugin_registry(self): @@ -33,7 +36,8 @@ self.sysinfo.add_header("Swap usage", "None") self.assertEqual( self.sysinfo.get_headers(), - [("Memory usage", "65%"), ("Swap usage", "None")]) + [("Memory usage", "65%"), ("Swap usage", "None")], + ) self.assertEqual(self.sysinfo.get_notes(), []) self.assertEqual(self.sysinfo.get_footnotes(), []) @@ -42,31 +46,41 @@ self.sysinfo.add_header("Header2", "Value2") self.sysinfo.add_header("Header3", "Value3") self.sysinfo.add_header("Header2", "Value4") - self.assertEqual(self.sysinfo.get_headers(), - [("Header1", "Value1"), - ("Header2", "Value2"), - ("Header2", "Value4"), - ("Header3", "Value3")]) + self.assertEqual( + self.sysinfo.get_headers(), + [ + ("Header1", "Value1"), + ("Header2", "Value2"), + ("Header2", "Value4"), + ("Header3", "Value3"), + ], + ) def test_add_header_with_none_value(self): self.sysinfo.add_header("Header1", "Value1") self.sysinfo.add_header("Header2", None) self.sysinfo.add_header("Header3", "Value3") - self.assertEqual(self.sysinfo.get_headers(), - [("Header1", "Value1"), - ("Header3", "Value3")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Header1", "Value1"), ("Header3", "Value3")], + ) self.sysinfo.add_header("Header2", "Value2") - self.assertEqual(self.sysinfo.get_headers(), - [("Header1", "Value1"), - ("Header2", "Value2"), - ("Header3", "Value3")]) + self.assertEqual( + self.sysinfo.get_headers(), + [ + ("Header1", "Value1"), + ("Header2", "Value2"), + ("Header3", "Value3"), + ], + ) def test_add_and_get_notes(self): self.sysinfo.add_note("Your laptop is burning!") self.sysinfo.add_note("Oh, your house too, btw.") self.assertEqual( self.sysinfo.get_notes(), - ["Your laptop is burning!", "Oh, your house too, btw."]) + ["Your laptop is burning!", "Oh, your house too, btw."], + ) self.assertEqual(self.sysinfo.get_headers(), []) self.assertEqual(self.sysinfo.get_footnotes(), []) @@ -75,14 +89,13 @@ self.sysinfo.add_footnote("Go! Go!") self.assertEqual( self.sysinfo.get_footnotes(), - ["Graphs available at http://graph", "Go! Go!"]) + ["Graphs available at http://graph", "Go! Go!"], + ) self.assertEqual(self.sysinfo.get_headers(), []) self.assertEqual(self.sysinfo.get_notes(), []) def test_run(self): - - class Plugin(object): - + class Plugin: def __init__(self, deferred): self._deferred = deferred @@ -115,7 +128,8 @@ plugin_exception_message = ( "There were exceptions while processing one or more plugins. " - "See %s/sysinfo.log for more information.") + "See %s/sysinfo.log for more information." + ) def test_plugins_run_after_synchronous_error(self): """ @@ -125,8 +139,7 @@ self.log_helper.ignore_errors(ZeroDivisionError) plugins_what_run = [] - class BadPlugin(object): - + class BadPlugin: def register(self, registry): pass @@ -134,8 +147,7 @@ plugins_what_run.append(self) 1 / 0 - class GoodPlugin(object): - + class GoodPlugin: def register(self, registry): pass @@ -158,13 +170,13 @@ path = os.path.expanduser("~/.landscape") self.assertEqual( self.sysinfo.get_notes(), - [self.plugin_exception_message % path]) + [self.plugin_exception_message % path], + ) def test_asynchronous_errors_logged(self): self.log_helper.ignore_errors(ZeroDivisionError) - class BadPlugin(object): - + class BadPlugin: def register(self, registry): pass @@ -181,21 +193,20 @@ path = os.path.expanduser("~/.landscape") self.assertEqual( self.sysinfo.get_notes(), - [self.plugin_exception_message % path]) + [self.plugin_exception_message % path], + ) def test_multiple_exceptions_get_one_note(self): self.log_helper.ignore_errors(ZeroDivisionError) - class RegularBadPlugin(object): - + class RegularBadPlugin: def register(self, registry): pass def run(self): 1 / 0 - class AsyncBadPlugin(object): - + class AsyncBadPlugin: def register(self, registry): pass @@ -211,7 +222,8 @@ path = os.path.expanduser("~/.landscape") self.assertEqual( self.sysinfo.get_notes(), - [self.plugin_exception_message % path]) + [self.plugin_exception_message % path], + ) @mock.patch("os.getuid", return_value=0) def test_exception_running_as_privileged_user(self, uid_mock): @@ -221,8 +233,7 @@ directory. """ - class AsyncBadPlugin(object): - + class AsyncBadPlugin: def register(self, registry): pass @@ -239,11 +250,11 @@ path = "/var/log/landscape" self.assertEqual( self.sysinfo.get_notes(), - [self.plugin_exception_message % path]) + [self.plugin_exception_message % path], + ) class FormatTest(unittest.TestCase): - def test_no_headers(self): output = format_sysinfo([]) self.assertEqual(output, "") @@ -253,48 +264,62 @@ self.assertEqual(output, "Header: Value") def test_parallel_headers_with_just_enough_space(self): - output = format_sysinfo([("Header1", "Value1"), - ("Header2", "Value2")], width=34) + output = format_sysinfo( + [("Header1", "Value1"), ("Header2", "Value2")], + width=34, + ) self.assertEqual(output, "Header1: Value1 Header2: Value2") def test_stacked_headers_which_barely_doesnt_fit(self): - output = format_sysinfo([("Header1", "Value1"), - ("Header2", "Value2")], width=33) + output = format_sysinfo( + [("Header1", "Value1"), ("Header2", "Value2")], + width=33, + ) self.assertEqual(output, "Header1: Value1\nHeader2: Value2") def test_stacked_headers_with_clearly_insufficient_space(self): - output = format_sysinfo([("Header1", "Value1"), - ("Header2", "Value2")], width=1) - self.assertEqual(output, - "Header1: Value1\n" - "Header2: Value2") + output = format_sysinfo( + [("Header1", "Value1"), ("Header2", "Value2")], + width=1, + ) + self.assertEqual(output, "Header1: Value1\n" "Header2: Value2") def test_indent_headers_in_parallel_with_just_enough_space(self): - output = format_sysinfo([("Header1", "Value1"), - ("Header2", "Value2")], indent=">>", width=36) + output = format_sysinfo( + [("Header1", "Value1"), ("Header2", "Value2")], + indent=">>", + width=36, + ) self.assertEqual(output, ">>Header1: Value1 Header2: Value2") def test_indent_headers_stacked_which_barely_doesnt_fit(self): - output = format_sysinfo([("Header1", "Value1"), - ("Header2", "Value2")], indent=">>", width=35) - self.assertEqual(output, - ">>Header1: Value1\n" - ">>Header2: Value2") + output = format_sysinfo( + [("Header1", "Value1"), ("Header2", "Value2")], + indent=">>", + width=35, + ) + self.assertEqual(output, ">>Header1: Value1\n" ">>Header2: Value2") def test_parallel_and_stacked_headers(self): - headers = [("Header%d" % i, "Value%d" % i) for i in range(1, 6)] + headers = [(f"Header{i:d}", f"Value{i:d}") for i in range(1, 6)] output = format_sysinfo(headers) self.assertEqual( output, "Header1: Value1 Header3: Value3 Header5: Value5\n" - "Header2: Value2 Header4: Value4") + "Header2: Value2 Header4: Value4", + ) def test_value_alignment(self): - output = format_sysinfo([("Header one", "Value one"), - ("Header2", "Value2"), - ("Header3", "Value3"), - ("Header4", "Value4"), - ("Header5", "Value five")], width=45) + output = format_sysinfo( + [ + ("Header one", "Value one"), + ("Header2", "Value2"), + ("Header3", "Value3"), + ("Header4", "Value4"), + ("Header5", "Value five"), + ], + width=45, + ) # These headers and values were crafted to cover several cases: # # - Header padding (Header2 and Header3) @@ -302,105 +327,126 @@ # - Lack of value padding due to a missing last column (Value3) # - Lack of value padding due to being a last column (Value4) # - self.assertEqual(output, - "Header one: Value one Header4: Value4\n" - "Header2: Value2 Header5: Value five\n" - "Header3: Value3") + self.assertEqual( + output, + "Header one: Value one Header4: Value4\n" + "Header2: Value2 Header5: Value five\n" + "Header3: Value3", + ) def test_one_note(self): - self.assertEqual(format_sysinfo(notes=["Something's wrong!"]), - "=> Something's wrong!") + self.assertEqual( + format_sysinfo(notes=["Something's wrong!"]), + "=> Something's wrong!", + ) def test_more_notes(self): - self.assertEqual(format_sysinfo(notes=["Something's wrong", - "You should look at it", - "Really"]), - "=> Something's wrong\n" - "=> You should look at it\n" - "=> Really") + self.assertEqual( + format_sysinfo( + notes=["Something's wrong", "You should look at it", "Really"], + ), + "=> Something's wrong\n=> You should look at it\n=> Really", + ) def test_indented_notes(self): - self.assertEqual(format_sysinfo(notes=["Something's wrong", - "You should look at it", - "Really"], indent=">>"), - ">>=> Something's wrong\n" - ">>=> You should look at it\n" - ">>=> Really") + self.assertEqual( + format_sysinfo( + notes=["Something's wrong", "You should look at it", "Really"], + indent=">>", + ), + ">>=> Something's wrong\n" + ">>=> You should look at it\n" + ">>=> Really", + ) def test_header_and_note(self): - self.assertEqual(format_sysinfo(headers=[("Header", "Value")], - notes=["Note"]), - "Header: Value\n" - "\n" - "=> Note") + self.assertEqual( + format_sysinfo(headers=[("Header", "Value")], notes=["Note"]), + "Header: Value\n" "\n" "=> Note", + ) def test_one_footnote(self): # Pretty dumb. - self.assertEqual(format_sysinfo(footnotes=["Graphs at http://..."]), - "Graphs at http://...") + self.assertEqual( + format_sysinfo(footnotes=["Graphs at http://..."]), + "Graphs at http://...", + ) def test_more_footnotes(self): # Still dumb. - self.assertEqual(format_sysinfo(footnotes=["Graphs at http://...", - "Lunch at ..."]), - "Graphs at http://...\n" - "Lunch at ...") + self.assertEqual( + format_sysinfo(footnotes=["Graphs at http://...", "Lunch at ..."]), + "Graphs at http://...\nLunch at ...", + ) def test_indented_footnotes(self): # Barely more interesting. - self.assertEqual(format_sysinfo(footnotes=["Graphs at http://...", - "Lunch at ..."], - indent=">>"), - ">>Graphs at http://...\n" - ">>Lunch at ...") + self.assertEqual( + format_sysinfo( + footnotes=["Graphs at http://...", "Lunch at ..."], + indent=">>", + ), + ">>Graphs at http://...\n>>Lunch at ...", + ) def test_header_and_footnote(self): # Warming up. - self.assertEqual(format_sysinfo(headers=[("Header", "Value")], - footnotes=["Footnote"]), - "Header: Value\n" - "\n" - "Footnote" - ) + self.assertEqual( + format_sysinfo( + headers=[("Header", "Value")], + footnotes=["Footnote"], + ), + "Header: Value\n\nFootnote", + ) def test_header_note_and_footnote(self): # Nice. - self.assertEqual(format_sysinfo(headers=[("Header", "Value")], - notes=["Note"], - footnotes=["Footnote"]), - "Header: Value\n" - "\n" - "=> Note\n" - "\n" - "Footnote" - ) + self.assertEqual( + format_sysinfo( + headers=[("Header", "Value")], + notes=["Note"], + footnotes=["Footnote"], + ), + "Header: Value\n\n=> Note\n\nFootnote", + ) def test_indented_headers_notes_and_footnotes(self): # Hot! - self.assertEqual(format_sysinfo(headers=[("Header1", "Value1"), - ("Header2", "Value2"), - ("Header3", "Value3")], - notes=["Note1", "Note2"], - footnotes=["Footnote1", "Footnote2"], - indent=" ", - width=36), - " Header1: Value1 Header3: Value3\n" - " Header2: Value2\n" - "\n" - " => Note1\n" - " => Note2\n" - "\n" - " Footnote1\n" - " Footnote2" - ) + self.assertEqual( + format_sysinfo( + headers=[ + ("Header1", "Value1"), + ("Header2", "Value2"), + ("Header3", "Value3"), + ], + notes=["Note1", "Note2"], + footnotes=["Footnote1", "Footnote2"], + indent=" ", + width=36, + ), + " Header1: Value1 Header3: Value3\n" + " Header2: Value2\n" + "\n" + " => Note1\n" + " => Note2\n" + "\n" + " Footnote1\n" + " Footnote2", + ) def test_wrap_long_notes(self): self.assertEqual( - format_sysinfo(notes=[ - "I do believe that a very long note, such as one that is " - "longer than about 50 characters, should wrap at the " - "specified width."], width=50, indent="Z"), + format_sysinfo( + notes=[ + "I do believe that a very long note, such as one that is " + "longer than about 50 characters, should wrap at the " + "specified width.", + ], + width=50, + indent="Z", + ), """\ Z=> I do believe that a very long note, such as one that is longer than about 50 characters, - should wrap at the specified width.""") + should wrap at the specified width.""", + ) diff -Nru landscape-client-23.02/landscape/sysinfo/tests/test_temperature.py landscape-client-23.08/landscape/sysinfo/tests/test_temperature.py --- landscape-client-23.02/landscape/sysinfo/tests/test_temperature.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape/sysinfo/tests/test_temperature.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,14 +1,13 @@ import os +from landscape.lib.tests.test_sysstats import ThermalZoneTest from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.temperature import Temperature -from landscape.lib.tests.test_sysstats import ThermalZoneTest class TemperatureTest(ThermalZoneTest): - def setUp(self): - super(TemperatureTest, self).setUp() + super().setUp() self.temperature = Temperature(self.thermal_zone_path) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.temperature) @@ -19,13 +18,14 @@ def test_run_adds_header(self): self.write_thermal_zone("THM0", "51000") self.temperature.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Temperature", "51.0 C")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Temperature", "51.0 C")], + ) def test_ignores_bad_files(self): self.write_thermal_zone("THM0", "") - temperature_path = os.path.join(self.thermal_zone_path, - "THM0/temp") + temperature_path = os.path.join(self.thermal_zone_path, "THM0/temp") file = open(temperature_path, "w") file.write("bad-label: 51 C") file.close() @@ -42,5 +42,7 @@ self.write_thermal_zone("THM1", "53000") self.write_thermal_zone("THM2", "52000") self.temperature.run() - self.assertEqual(self.sysinfo.get_headers(), - [("Temperature", "53.0 C")]) + self.assertEqual( + self.sysinfo.get_headers(), + [("Temperature", "53.0 C")], + ) diff -Nru landscape-client-23.02/landscape-client.conf landscape-client-23.08/landscape-client.conf --- landscape-client-23.02/landscape-client.conf 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/landscape-client.conf 2023-08-17 21:19:32.000000000 +0000 @@ -9,4 +9,3 @@ log_level = debug pid_file = /tmp/landscape/landscape-client.pid ping_url = http://localhost:8081/ping - diff -Nru landscape-client-23.02/LICENSE landscape-client-23.08/LICENSE --- landscape-client-23.02/LICENSE 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/LICENSE 2023-08-17 21:19:32.000000000 +0000 @@ -55,7 +55,7 @@ The precise terms and conditions for copying, distribution and modification follow. - + GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION @@ -110,7 +110,7 @@ License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - + These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in @@ -168,7 +168,7 @@ access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - + 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is @@ -225,7 +225,7 @@ This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - + 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License @@ -278,7 +278,7 @@ POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS - + How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest diff -Nru landscape-client-23.02/Makefile landscape-client-23.08/Makefile --- landscape-client-23.02/Makefile 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/Makefile 2023-08-17 21:19:32.000000000 +0000 @@ -2,9 +2,15 @@ TXT2MAN ?= txt2man PYTHON2 ?= python2 PYTHON3 ?= python3 +SNAPCRAFT = SNAPCRAFT_BUILD_INFO=1 snapcraft TRIAL ?= -m twisted.trial TRIAL_ARGS ?= +# PEP8 rules ignored: +# W503 https://www.flake8rules.com/rules/W503.html +# E203 Whitespace before ':' (enforced by Black) +PEP8_IGNORED = W503,E203 + .PHONY: help help: ## Print help about available targets @grep -h -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' @@ -15,11 +21,15 @@ .PHONY: depends2 depends2: - sudo apt-get -y install python-twisted-core python-distutils-extra python-mock python-configobj python-netifaces python-pycurl + sudo apt-get -y install python-twisted-core python-distutils-extra python-mock python-configobj python-netifaces python-pycurl python-pip + pip install pre-commit + pre-commit install .PHONY: depends3 depends3: - sudo apt-get -y install python3-twisted python3-distutils-extra python3-mock python3-configobj python3-netifaces python3-pycurl + sudo apt-get -y install python3-twisted python3-distutils-extra python3-mock python3-configobj python3-netifaces python3-pycurl python3-pip + pip3 install pre-commit + pre-commit install all: build @@ -56,13 +66,15 @@ .PHONY: lint lint: - $(PYTHON3) -m flake8 --ignore E24,E121,E123,E125,E126,E221,E226,E266,E704,E265,W504 \ - `find landscape -name \*.py` + $(PYTHON3) -m flake8 --ignore $(PEP8_IGNORED) `find landscape -name \*.py` .PHONY: pyflakes pyflakes: -pyflakes `find landscape -name \*.py` +pre-commit: + -pre-commit run -a + clean: -find landscape -name __pycache__ -exec rm -rf {} \; -find landscape -name \*.pyc -exec rm -f {} \; @@ -115,6 +127,35 @@ etags: -etags --languages=python -R . +snap-install: + sudo snap install --devmode landscape-client_0.1_amd64.snap +.PHONY: snap-install + +snap-remote-build: + snapcraft remote-build +.PHONY: snap-remote-build + +snap-remove: + sudo snap remove --purge landscape-client +.PHONY: snap-remove + +snap-shell: snap-install + sudo snap run --shell landscape-client.landscape-client +.PHONY: snap-shell + +snap-debug: + $(SNAPCRAFT) -v --debug +.PHONY: snap-debug + +snap-clean: snap-remove + $(SNAPCRAFT) clean + -rm landscape-client_0.1_amd64.snap +.PHONY: snap-clean + +snap: + $(SNAPCRAFT) +.PHONY: snap + include Makefile.packaging .DEFAULT_GOAL := help diff -Nru landscape-client-23.02/man/landscape-client.1 landscape-client-23.08/man/landscape-client.1 --- landscape-client-23.02/man/landscape-client.1 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/man/landscape-client.1 2023-08-17 21:19:32.000000000 +0000 @@ -19,7 +19,7 @@ management software. The client is responsible for communicating system information to the landscape server and executing remote management commands on the system. -.SH OPTIONS +.SH OPTIONS .TP .B \fB--version\fP @@ -31,7 +31,7 @@ .TP .B \fB-c\fP FILE, \fB--config\fP=FILE -Use config from this file (any command line +Use config from this file (any command line \fIoptions\fP override settings from the file). (default: \(cq/etc/landscape/client.conf') .TP @@ -64,7 +64,7 @@ .TP .B \fB-k\fP SSL_PUBLIC_KEY, \fB--ssl-public-key\fP=SSL_PUBLIC_KEY -The public SSL key to +The public SSL key to verify the server. Only used if the given server URL is https. .TP @@ -87,7 +87,7 @@ .B \fB--monitor-only\fP Don't enable management features. This is useful -if you want to run the client as a non-root +if you want to run the client as a non-root user. .SH EXAMPLES diff -Nru landscape-client-23.02/man/landscape-client.txt landscape-client-23.08/man/landscape-client.txt --- landscape-client-23.02/man/landscape-client.txt 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/man/landscape-client.txt 2023-08-17 21:19:32.000000000 +0000 @@ -12,10 +12,10 @@ system information to the landscape server and executing remote management commands on the system. -OPTIONS +OPTIONS --version Show program's version number and exit. -h, --help Show this help message and exit. - -c FILE, --config=FILE Use config from this file (any command line + -c FILE, --config=FILE Use config from this file (any command line options override settings from the file). (default: '/etc/landscape/client.conf') -d PATH, --data-path=PATH The directory to store data files in (default: @@ -27,7 +27,7 @@ -u URL, --url=URL The server URL to connect to. --ping-url=PING_URL The URL to perform lightweight exchange initiation with. - -k SSL_PUBLIC_KEY, --ssl-public-key=SSL_PUBLIC_KEY The public SSL key to + -k SSL_PUBLIC_KEY, --ssl-public-key=SSL_PUBLIC_KEY The public SSL key to verify the server. Only used if the given server URL is https. --ignore-sigint Ignore interrupt signals. @@ -35,7 +35,7 @@ --daemon Fork and run in the background. --pid-file=PID_FILE The file to write the PID to. --monitor-only Don't enable management features. This is useful - if you want to run the client as a non-root + if you want to run the client as a non-root user. EXAMPLES diff -Nru landscape-client-23.02/man/landscape-config.1 landscape-client-23.08/man/landscape-config.1 --- landscape-client-23.02/man/landscape-config.1 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/man/landscape-config.1 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,5 @@ .\" Text automatically generated by txt2man -.TH landscape-config 1 "07 February 2022" "" "" +.TH landscape-config 1 "11 March 2023" "" "" .SH NAME \fBlandscape-config \fP- configure the Landscape management client \fB @@ -74,7 +74,7 @@ .TP .B \fB-p\fP KEY, \fB--registration-key\fP=KEY -The account-wide key +The account-wide key used for registering clients. .TP .B @@ -87,7 +87,7 @@ .TP .B \fB-k\fP SSL_PUBLIC_KEY, \fB--ssl-public-key\fP=SSL_PUBLIC_KEY -The SSL CA certificate to +The SSL CA certificate to verify the server with. Only used if the server URL to which we connect is https. .TP @@ -121,6 +121,21 @@ runs (default: 21600). .TP .B +\fB--flush-interval\fP +The number of seconds between flushes to disk for +persisent data. +.TP +.B +\fB--stagger-launch\fP=STAGGER_RATIO +Ratio, between 0 and 1, by which to stagger various +tasks of landscape. +.TP +.B +\fB--manage-sources-list-d\fP +Repository profiles manage the files in +\(cqetc/apt/sources.list.d'. +.TP +.B \fB--http-proxy\fP=URL The URL of the HTTP proxy, if one is needed. .TP @@ -129,9 +144,8 @@ The URL of the HTTPS proxy, if one is needed. .TP .B -\fB--cloud\fP -Set this if this computer is a cloud instance in -EC2 or UEC. Read below for details. +\fB--access-group\fP +Suggested access group for this computer. .TP .B \fB--tags\fP=TAGS @@ -139,11 +153,6 @@ server. .TP .B -\fB--stagger-launch\fP=STAGGER_RATIO -Ratio, between 0 and 1, by which to scatter various -tasks of landscape. -.TP -.B \fB--import\fP=FILENAME_OR_URL Filename or URL to import configuration from. Imported \fIoptions\fP behave as if they were passed in @@ -153,12 +162,12 @@ .B \fB--script-users\fP=USERS A comma-separated list of users to allow scripts to -run. To allow scripts to be run by any user, +run. To allow scripts to be run by any user, enter: ALL. .TP .B \fB--include-manager-plugins\fP=PLUGINS -A comma-separated list of manager +A comma-separated list of manager plugins to load explicitly. .TP .B @@ -179,19 +188,19 @@ Stop running clients and disable start at boot. .TP .B -\fB--otp\fP=OTP -The one-time password (OTP) to use in cloud configuration. +\fB--init\fP +Set up the client directories structure and exit. .TP .B \fB--is-registered\fP -Exit with code 0 (success) if client -is registered else returns 5. Display +Exit with code 0 (success) if client +is registered else returns 5. Display registration info. .SH CLOUD Landscape has some cloud features that become available when the EC2 or UEC machine instance was started using Landscape and the AMI is one of -the official ones provided in the Web user interface. We call these +the official ones provided in the Web user interface. We call these instances "Landscape enabled", because they contain a pre-configured landscape-client installed in them which will register the running instance automatically with Landscape as soon as it starts up. @@ -201,8 +210,7 @@ .IP \(bu 3 make sure the cloud is created in Landscape .IP \(bu 3 -either add "CLOUD=1" to /etc/default/landscape-client or use the -\fB--cloud\fP switch in the \fBlandscape-config\fP(1) command line +add "CLOUD=1" to /etc/default/landscape-client .IP \(bu 3 make sure the client is configured to start at boot (i.e., the /etc/default/landscape-client has the line "RUN=1") diff -Nru landscape-client-23.02/man/landscape-config.txt landscape-client-23.08/man/landscape-config.txt --- landscape-client-23.02/man/landscape-config.txt 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/man/landscape-config.txt 2023-08-17 21:19:32.000000000 +0000 @@ -34,11 +34,11 @@ --ignore-sigint Ignore interrupt signals. --ignore-sigusr1 Ignore SIGUSR1 signal to rotate logs. -a NAME, --account-name=NAME The account this computer belongs to. - -p KEY, --registration-key=KEY The account-wide key + -p KEY, --registration-key=KEY The account-wide key used for registering clients. -t TITLE, --computer-title=TITLE The title of this computer. -u URL, --url=URL The server URL to connect to (default: 'https://landscape.canonical.com/message-system'). - -k SSL_PUBLIC_KEY, --ssl-public-key=SSL_PUBLIC_KEY The SSL CA certificate to + -k SSL_PUBLIC_KEY, --ssl-public-key=SSL_PUBLIC_KEY The SSL CA certificate to verify the server with. Only used if the server URL to which we connect is https. --exchange-interval=INTERVAL The number of seconds between server @@ -50,40 +50,45 @@ with (default: 'http://landscape.canonical.com/ping'). --package-monitor-interval=PACKAGE_MONITOR_INTERVAL The interval between package monitor runs (default: 1800). + --snap-monitor-interval=SNAP_MONITOR_INTERVAL The interval between snap + monitor runs (default: 1800). --apt-update-interval=APT_UPDATE_INTERVAL The interval between apt update runs (default: 21600). + --flush-interval The number of seconds between flushes to disk for + persisent data. + --stagger-launch=STAGGER_RATIO Ratio, between 0 and 1, by which to stagger various + tasks of landscape. + --manage-sources-list-d Repository profiles manage the files in + 'etc/apt/sources.list.d'. --http-proxy=URL The URL of the HTTP proxy, if one is needed. --https-proxy=URL The URL of the HTTPS proxy, if one is needed. - --cloud Set this if this computer is a cloud instance in - EC2 or UEC. Read below for details. + --access-group Suggested access group for this computer. --tags=TAGS Comma separated list of tag names to be sent to the server. - --stagger-launch=STAGGER_RATIO Ratio, between 0 and 1, by which to scatter various - tasks of landscape. --import=FILENAME_OR_URL Filename or URL to import configuration from. Imported options behave as if they were passed in the command line, with precedence being given to real command line options. --script-users=USERS A comma-separated list of users to allow scripts to - run. To allow scripts to be run by any user, + run. To allow scripts to be run by any user, enter: ALL. - --include-manager-plugins=PLUGINS A comma-separated list of manager + --include-manager-plugins=PLUGINS A comma-separated list of manager plugins to load explicitly. -n, --no-start Don't start the client automatically. --ok-no-register Return exit code 0 instead of 2 if the client can't be registered. --silent Run without manual interaction. --disable Stop running clients and disable start at boot. - --otp=OTP The one-time password (OTP) to use in cloud configuration. - --is-registered Exit with code 0 (success) if client - is registered else returns 5. Display + --init Set up the client directories structure and exit. + --is-registered Exit with code 0 (success) if client + is registered else returns 5. Display registration info. CLOUD Landscape has some cloud features that become available when the EC2 or UEC machine instance was started using Landscape and the AMI is one of -the official ones provided in the Web user interface. We call these +the official ones provided in the Web user interface. We call these instances "Landscape enabled", because they contain a pre-configured landscape-client installed in them which will register the running instance automatically with Landscape as soon as it starts up. @@ -91,8 +96,7 @@ You can deploy your own AMI, but if you wish the instance to become "Landscape managed" you need to take a few steps: * make sure the cloud is created in Landscape -* either add "CLOUD=1" to /etc/default/landscape-client or use the ---cloud switch in the landscape-config(1) command line +* add "CLOUD=1" to /etc/default/landscape-client * make sure the client is configured to start at boot (i.e., the /etc/default/landscape-client has the line "RUN=1") diff -Nru landscape-client-23.02/man/landscape-sysinfo.txt landscape-client-23.08/man/landscape-sysinfo.txt --- landscape-client-23.02/man/landscape-sysinfo.txt 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/man/landscape-sysinfo.txt 2023-08-17 21:19:32.000000000 +0000 @@ -24,14 +24,14 @@ OPTIONS --version show program's version number and exit -h, --help show this help message and exit - -c FILE, --config=FILE + -c FILE, --config=FILE Use config from this file (any command line options override settings from the file) (default: '/etc/landscape/client.conf'). - -d PATH, --data-path=PATH + -d PATH, --data-path=PATH The directory to store data files in (default: '/var/lib/landscape/client/'). - --sysinfo-plugins=PLUGIN_LIST + --sysinfo-plugins=PLUGIN_LIST Comma-delimited list of sysinfo plugins to use. - --exclude-sysinfo-plugins=PLUGIN_LIST + --exclude-sysinfo-plugins=PLUGIN_LIST Comma-delimited list of sysinfo plugins to NOT use. This always take precedence over plugins to include. @@ -79,10 +79,10 @@ => There is 1 zombie process. FILES -/etc/landscape/client.conf +/etc/landscape/client.conf Configuration file -/var/log/landscape/sysinfo.log +/var/log/landscape/sysinfo.log Log file for when the tool is run as root. This file will usually be empty, unless something wrong happened. In that case, it will have more information about the problem. @@ -91,4 +91,3 @@ SEE ALSO landscape-client(1) update-motd(5) - diff -Nru landscape-client-23.02/pqm-tests.sh landscape-client-23.08/pqm-tests.sh --- landscape-client-23.02/pqm-tests.sh 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/pqm-tests.sh 2023-08-17 21:19:32.000000000 +0000 @@ -54,4 +54,3 @@ then exit 1 fi - diff -Nru landscape-client-23.02/.pre-commit-config.yaml landscape-client-23.08/.pre-commit-config.yaml --- landscape-client-23.02/.pre-commit-config.yaml 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/.pre-commit-config.yaml 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,43 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - id: debug-statements +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: flake8 + args: + - "--max-line-length=79" + - "--select=B,C,E,F,W,T4,B9" + - "--ignore=E203,W503" +- repo: https://github.com/psf/black + rev: 22.12.0 + hooks: + - id: black + args: + - --line-length=79 + - --include='\.pyi?$' +- repo: https://github.com/asottile/reorder_python_imports + rev: v2.3.0 + hooks: + - id: reorder-python-imports + args: [--py3-plus] +- repo: https://github.com/asottile/add-trailing-comma + rev: v2.0.1 + hooks: + - id: add-trailing-comma + args: [--py36-plus] +exclude: > + (?x)( + \.git + | \.csv$ + | \.__pycache__ + | \.log$ + ) diff -Nru landscape-client-23.02/pyproject.toml landscape-client-23.08/pyproject.toml --- landscape-client-23.02/pyproject.toml 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/pyproject.toml 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,13 @@ +[tool.black] +line-length = 79 +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.tox + | \.venv + | _build + | build + | dist +)/ +''' diff -Nru landscape-client-23.02/README landscape-client-23.08/README --- landscape-client-23.02/README 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/README 2023-08-17 21:19:32.000000000 +0000 @@ -5,7 +5,7 @@ Add our beta PPA to get the latest updates to the landscape-client package -#### Add repo to an Ubuntu series +#### Add repo to an Ubuntu series ``` sudo add-apt-repository ppa:landscape/self-hosted-beta ``` @@ -50,7 +50,7 @@ ## Running -Now you can complete the configuration of your client and register with the +Now you can complete the configuration of your client and register with the Landscape service. There are two ways to do this: 1. `sudo landscape-config` and answer interactive prompts to finalize your configuration @@ -86,3 +86,104 @@ make check3 make lint ``` + +### Building the Landscape Client snap + +First, you need to ensure that you have the appropriate tools installed: +``` +$ sudo snap install snapcraft --classic +$ lxd init --auto +``` + +There are various make targets defined to assist in the lifecycle of +building and testing the snap. To simply build the snap with the minimum +of debug information displayed: +``` +$ make snap +``` + +If you would prefer to see more information displayed showing the progress +of the build, and would like to get dropped into a debug shell within the +snap container in the event of an error: +``` +$ make snap-debug +``` + +To install the resulting snap: +``` +$ make snap-install +``` + +To remove a previously installed snap: +``` +$ make snap-remove +``` + +To clean the intermediate files as well as the snap itself from the local +build environment: +``` +$ make snap-clean +``` + +To enter into a shell environment within the snap container: +``` +$ make snap-shell +``` + +If you wish to upload the snap to the store, you will first need to get +credentials with the store that allow you to log in locally and publish +binaries. This can be done at: + +https://snapcraft.io/docs/creating-your-developer-account + +After obtaining and confirming your store credentials, you then need to +log in using the snapcraft tool: +``` +$ snapcraft login +``` + +Since snapcraft version 7.x and higher, the credentials are stored in the +gnome keyring on your local workstation. If you are building in an +environment without a GUI (e.g. in a multipass or lxc container), you +will need to install the gnome keyring tools: +``` +$ sudo apt install gnome-keyring +``` + +You will then need to initialze the default keyring as follows: +``` +$ dbus-run-session -- bash +$ gnome-keyring-daemon --unlock +``` +The gnome-keyring-daemon will prompt you (without a prompt) to type in +the initial unlock password (typically the same password for the account +you are using - if you are using the default multipass or lxc "ubuntu" +login, use sudo passwd ubuntu to set it to a known value before doing +the above step). + +Type the login password and hit followed by +D to end +the input. + +At this point, you should be able to log into snapcraft: +``` +$ snapcraft login +``` +You will be prompted for your UbuntuOne email address, password and, +if set up this way, your second factor for authentication. If you +are successful, you should be able to query your login credentials +with: +``` +$ snapcraft whoami +``` +A common mistake that first-time users of this process might make +is that after running the gnome-keyring-daemon command, they will +exit the dbus session shell. Do NOT do that. Do all subsequent +work in that bash shell that dbus set up for you because it will +have access to your gnome-keyring. + +If you need to leave the environment and get back in, keep in mind +that you do not have to be logged into the store UNLESS you are +uploading the snap or otherwise manipulating things in your store +account. You will need to repeat the dbus-run-session and +gnome-keyring-daemon steps BEFORE logging in if you do need to be +logged into the store. diff -Nru landscape-client-23.02/README.md landscape-client-23.08/README.md --- landscape-client-23.02/README.md 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/README.md 2023-08-17 21:19:32.000000000 +0000 @@ -5,7 +5,7 @@ Add our beta PPA to get the latest updates to the landscape-client package -#### Add repo to an Ubuntu series +#### Add repo to an Ubuntu series ``` sudo add-apt-repository ppa:landscape/self-hosted-beta ``` @@ -50,7 +50,7 @@ ## Running -Now you can complete the configuration of your client and register with the +Now you can complete the configuration of your client and register with the Landscape service. There are two ways to do this: 1. `sudo landscape-config` and answer interactive prompts to finalize your configuration @@ -86,3 +86,104 @@ make check3 make lint ``` + +### Building the Landscape Client snap + +First, you need to ensure that you have the appropriate tools installed: +``` +$ sudo snap install snapcraft --classic +$ lxd init --auto +``` + +There are various make targets defined to assist in the lifecycle of +building and testing the snap. To simply build the snap with the minimum +of debug information displayed: +``` +$ make snap +``` + +If you would prefer to see more information displayed showing the progress +of the build, and would like to get dropped into a debug shell within the +snap container in the event of an error: +``` +$ make snap-debug +``` + +To install the resulting snap: +``` +$ make snap-install +``` + +To remove a previously installed snap: +``` +$ make snap-remove +``` + +To clean the intermediate files as well as the snap itself from the local +build environment: +``` +$ make snap-clean +``` + +To enter into a shell environment within the snap container: +``` +$ make snap-shell +``` + +If you wish to upload the snap to the store, you will first need to get +credentials with the store that allow you to log in locally and publish +binaries. This can be done at: + +https://snapcraft.io/docs/creating-your-developer-account + +After obtaining and confirming your store credentials, you then need to +log in using the snapcraft tool: +``` +$ snapcraft login +``` + +Since snapcraft version 7.x and higher, the credentials are stored in the +gnome keyring on your local workstation. If you are building in an +environment without a GUI (e.g. in a multipass or lxc container), you +will need to install the gnome keyring tools: +``` +$ sudo apt install gnome-keyring +``` + +You will then need to initialze the default keyring as follows: +``` +$ dbus-run-session -- bash +$ gnome-keyring-daemon --unlock +``` +The gnome-keyring-daemon will prompt you (without a prompt) to type in +the initial unlock password (typically the same password for the account +you are using - if you are using the default multipass or lxc "ubuntu" +login, use sudo passwd ubuntu to set it to a known value before doing +the above step). + +Type the login password and hit followed by +D to end +the input. + +At this point, you should be able to log into snapcraft: +``` +$ snapcraft login +``` +You will be prompted for your UbuntuOne email address, password and, +if set up this way, your second factor for authentication. If you +are successful, you should be able to query your login credentials +with: +``` +$ snapcraft whoami +``` +A common mistake that first-time users of this process might make +is that after running the gnome-keyring-daemon command, they will +exit the dbus session shell. Do NOT do that. Do all subsequent +work in that bash shell that dbus set up for you because it will +have access to your gnome-keyring. + +If you need to leave the environment and get back in, keep in mind +that you do not have to be logged into the store UNLESS you are +uploading the snap or otherwise manipulating things in your store +account. You will need to repeat the dbus-run-session and +gnome-keyring-daemon steps BEFORE logging in if you do need to be +logged into the store. diff -Nru landscape-client-23.02/scripts/landscape-broker landscape-client-23.08/scripts/landscape-broker --- landscape-client-23.02/scripts/landscape-broker 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-broker 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") diff -Nru landscape-client-23.02/scripts/landscape-client landscape-client-23.08/scripts/landscape-client --- landscape-client-23.02/scripts/landscape-client 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-client 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,12 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings + hide_warnings() from landscape.lib.fd import clean_fds @@ -12,7 +15,7 @@ # create a critical FD. Even before the reactor is installed! clean_fds() -from landscape.client.watchdog import run +from landscape.client.watchdog import run # noqa: E402 if __name__ == "__main__": sys.exit(run()) diff -Nru landscape-client-23.02/scripts/landscape-config landscape-client-23.08/scripts/landscape-config --- landscape-client-23.02/scripts/landscape-config 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-config 2023-08-17 21:19:32.000000000 +0000 @@ -1,9 +1,12 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings + hide_warnings() from landscape.client.configuration import main diff -Nru landscape-client-23.02/scripts/landscape-manager landscape-client-23.08/scripts/landscape-manager --- landscape-client-23.02/scripts/landscape-manager 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-manager 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") diff -Nru landscape-client-23.02/scripts/landscape-monitor landscape-client-23.08/scripts/landscape-monitor --- landscape-client-23.02/scripts/landscape-monitor 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-monitor 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") diff -Nru landscape-client-23.02/scripts/landscape-package-changer landscape-client-23.08/scripts/landscape-package-changer --- landscape-client-23.02/scripts/landscape-package-changer 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-package-changer 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") diff -Nru landscape-client-23.02/scripts/landscape-package-reporter landscape-client-23.08/scripts/landscape-package-reporter --- landscape-client-23.02/scripts/landscape-package-reporter 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-package-reporter 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") diff -Nru landscape-client-23.02/scripts/landscape-release-upgrader landscape-client-23.08/scripts/landscape-release-upgrader --- landscape-client-23.02/scripts/landscape-release-upgrader 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-release-upgrader 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,7 @@ #!/usr/bin/python3 -import sys, os +import os +import sys + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") diff -Nru landscape-client-23.02/scripts/landscape-sysinfo landscape-client-23.08/scripts/landscape-sysinfo --- landscape-client-23.02/scripts/landscape-sysinfo 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/scripts/landscape-sysinfo 2023-08-17 21:19:32.000000000 +0000 @@ -1,11 +1,15 @@ #!/usr/bin/python3 -import sys, os +import os +import sys try: - if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): + if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath( + "scripts", + ): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings + hide_warnings() from twisted.internet import reactor diff -Nru landscape-client-23.02/setup_client.py landscape-client-23.08/setup_client.py --- landscape-client-23.02/setup_client.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/setup_client.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/python - import sys from landscape import UPSTREAM_VERSION @@ -8,26 +7,27 @@ NAME = "Landscape Client" DESCRIPTION = "Landscape Client" PACKAGES = [ - "landscape.client", - "landscape.client.broker", - "landscape.client.manager", - "landscape.client.monitor", - "landscape.client.package", - "landscape.client.upgraders", - "landscape.client.user", - ] + "landscape.client", + "landscape.client.broker", + "landscape.client.manager", + "landscape.client.monitor", + "landscape.client.package", + "landscape.client.snap", + "landscape.client.upgraders", + "landscape.client.user", +] MODULES = [ - "landscape.client.accumulate", - "landscape.client.amp", - "landscape.client.configuration", - "landscape.client.deployment", - "landscape.client.diff", - "landscape.client.patch", - "landscape.client.reactor", - "landscape.client.service", - "landscape.client.sysvconfig", - "landscape.client.watchdog", - ] + "landscape.client.accumulate", + "landscape.client.amp", + "landscape.client.configuration", + "landscape.client.deployment", + "landscape.client.diff", + "landscape.client.patch", + "landscape.client.reactor", + "landscape.client.service", + "landscape.client.sysvconfig", + "landscape.client.watchdog", +] SCRIPTS = [] if sys.version_info[0] > 2: SCRIPTS += [ @@ -39,27 +39,28 @@ "scripts/landscape-package-changer", "scripts/landscape-package-reporter", "scripts/landscape-release-upgrader", - ] + ] # Dependencies DEB_REQUIRES = [ - "ca-certificates", - #python3-pycurl - ] + "ca-certificates", + # python3-pycurl +] REQUIRES = [ - "pycurl", - "landscape-lib={}".format(UPSTREAM_VERSION), - "landscape-sysinfo={}".format(UPSTREAM_VERSION), - ] + "pycurl", + f"landscape-lib={UPSTREAM_VERSION}", + f"landscape-sysinfo={UPSTREAM_VERSION}", +] if __name__ == "__main__": from setup import setup_landscape + setup_landscape( name=NAME, description=DESCRIPTION, packages=PACKAGES, modules=MODULES, scripts=SCRIPTS, - ) + ) diff -Nru landscape-client-23.02/setup_lib.py landscape-client-23.08/setup_lib.py --- landscape-client-23.02/setup_lib.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/setup_lib.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,40 +1,41 @@ #!/usr/bin/python -NAME = "landscape-lib", +NAME = ("landscape-lib",) DESCRIPTION = "Common code used by Landscape applications" PACKAGES = [ - "landscape.lib", - "landscape.lib.apt", - "landscape.lib.apt.package", - "landscape.message_schemas", - ] + "landscape.lib", + "landscape.lib.apt", + "landscape.lib.apt.package", + "landscape.message_schemas", +] MODULES = [ - "landscape.__init__", - "landscape.constants", - ] + "landscape.__init__", + "landscape.constants", +] SCRIPTS = [] # Dependencies DEB_REQUIRES = [ - "lsb-base", - "lsb-release", - "lshw", - ] + "lsb-base", + "lsb-release", + "lshw", +] REQUIRES = [ - "twisted", - "configobj", - #apt (from python3-apt) - ] + "twisted", + "configobj", + # apt (from python3-apt) +] if __name__ == "__main__": from setup import setup_landscape + setup_landscape( name=NAME, description=DESCRIPTION, packages=PACKAGES, modules=MODULES, scripts=SCRIPTS, - ) + ) diff -Nru landscape-client-23.02/setup.py landscape-client-23.08/setup.py --- landscape-client-23.02/setup.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/setup.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,46 +1,51 @@ #!/usr/bin/python - from distutils.core import setup - -# from python3-distutils-extra -from DistUtilsExtra.command import build_extra from DistUtilsExtra.auto import clean_build_tree +from DistUtilsExtra.command import build_extra from landscape import UPSTREAM_VERSION SETUP = dict( - name=None, - description=None, - packages=None, - py_modules=None, - scripts=None, - - version=UPSTREAM_VERSION, - author="Landscape Team", - author_email="landscape-team@canonical.com", - url="http://landscape.canonical.com", - cmdclass={"build": build_extra.build_extra, - "clean": clean_build_tree}, - ) - - -def setup_landscape(name, description, packages, modules=None, scripts=None, - **kwargs): + name=None, + description=None, + packages=None, + py_modules=None, + scripts=None, + version=UPSTREAM_VERSION, + author="Landscape Team", + author_email="landscape-team@canonical.com", + url="http://landscape.canonical.com", + cmdclass={"build": build_extra.build_extra, "clean": clean_build_tree}, +) + + +def setup_landscape( + name, + description, + packages, + modules=None, + scripts=None, + **kwargs, +): assert name and description and packages - kwargs = dict(SETUP, - name=name, - description=description, - packages=packages, - py_modules=modules, - scripts=scripts, - **kwargs) + kwargs = dict( + SETUP, + name=name, + description=description, + packages=packages, + py_modules=modules, + scripts=scripts, + **kwargs, + ) kwargs = {k: v for k, v in kwargs.items() if k is not None} setup(**kwargs) # Import these afterward to avoid circular imports. -import setup_lib, setup_sysinfo, setup_client +import setup_lib # noqa: E402 +import setup_sysinfo # noqa: E402 +import setup_client # noqa: E402 PACKAGES = [] MODULES = [] @@ -53,7 +58,7 @@ SCRIPTS += sub.SCRIPTS DEB_REQUIRES += sub.DEB_REQUIRES REQUIRES += sub.REQUIRES - + if __name__ == "__main__": setup_landscape( @@ -62,4 +67,4 @@ packages=PACKAGES, modules=MODULES, scripts=SCRIPTS, - ) + ) diff -Nru landscape-client-23.02/setup_sysinfo.py landscape-client-23.08/setup_sysinfo.py --- landscape-client-23.02/setup_sysinfo.py 2023-02-07 18:55:50.000000000 +0000 +++ landscape-client-23.08/setup_sysinfo.py 2023-08-17 21:19:32.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/python - import sys from landscape import UPSTREAM_VERSION @@ -8,30 +7,30 @@ NAME = "Landscape Sysinfo" DESCRIPTION = "Landscape Client" PACKAGES = [ - "landscape.sysinfo", - ] + "landscape.sysinfo", +] MODULES = [] SCRIPTS = [] if sys.version_info[0] > 2: SCRIPTS += [ "scripts/landscape-sysinfo", - ] + ] # Dependencies -DEB_REQUIRES = [ - ] +DEB_REQUIRES = [] REQUIRES = [ - "landscape-lib={}".format(UPSTREAM_VERSION), - ] + f"landscape-lib={UPSTREAM_VERSION}", +] if __name__ == "__main__": from setup import setup_landscape + setup_landscape( name=NAME, description=DESCRIPTION, packages=PACKAGES, modules=MODULES, scripts=SCRIPTS, - ) + ) Binary files /tmp/tmpw8604eb4/B4Um9A0vE1/landscape-client-23.02/snap/gui/landscape-logo-256.png and /tmp/tmpw8604eb4/ZWZQlGnV9v/landscape-client-23.08/snap/gui/landscape-logo-256.png differ diff -Nru landscape-client-23.02/snap/snapcraft.yaml landscape-client-23.08/snap/snapcraft.yaml --- landscape-client-23.02/snap/snapcraft.yaml 1970-01-01 00:00:00.000000000 +0000 +++ landscape-client-23.08/snap/snapcraft.yaml 2023-08-17 21:19:32.000000000 +0000 @@ -0,0 +1,98 @@ +name: landscape-client +base: core22 +version: '0.1' +icon: snap/gui/landscape-logo-256.png +website: https://ubuntu.com/landscape +summary: Client for the Canonical systems management product Landscape +description: | + This client, when installed, allows a machine to connect to the + Landscape server and be remotely managed by it. Be aware that + this snap is not a replacement for the apt package that is + traditionally used. This snap is specifically designed for + coreXX based systems and as such can only interact with other + snaps, not apt packages. + +grade: devel # must be 'stable' to release into candidate/stable channels +architectures: + - build-on: amd64 + - build-on: arm64 + - build-on: ppc64el + - build-on: s390x +confinement: strict + +layout: + /var/lib/landscape: + bind: $SNAP_DATA/var/lib/landscape + +environment: + LD_LIBRARY_PATH: $LD_LIBRARY_PATH:$SNAP/usr/lib/x86_64-linux-gnu + PYTHONPATH: $SNAP/usr/lib/python3/dist-packages:$SNAP/usr/lib/python3.10/dist-packages + +apps: + landscape-client: + daemon: simple + command: bin/landscape-client + plugs: + - network + landscape-config: + command: bin/landscape-config + plugs: + - network + +parts: + landscape-client: + plugin: python + source: https://github.com/CanonicalLtd/landscape-client.git + python-packages: + - convoy-python + - distutils-extra-python + - sasl + - twisted + build-packages: + - build-essential + - libsasl2-2 + - libsasl2-dev + - libsasl2-modules + - libsasl2-modules-db + - libsasl2-modules-gssapi-mit + - python3-distutils + - python3-flake8 + - python3-configobj + - python3-coverage + - python3-distutils-extra + - python3-mock + - python3-netifaces + - python3-pycurl + - python3-pip + - python3-twisted + - software-properties-common + override-build: | + python3 -m venv build/venv --system-site-packages + ln -sf build/venv/bin bin + bin/pip install -U convoy-python distutils-extra-python twisted pre-commit sasl + make build3 + stage-packages: + - landscape-client + - landscape-common + - language-pack-en + - libsasl2-2 + - libsasl2-dev + - libsasl2-modules + - libsasl2-modules-db + - libsasl2-modules-gssapi-mit + - python3-oops + - python3-pip + - python3-twisted + override-stage: | + craftctl default + # Copy the landscape-config script over + mkdir -p "${SNAPCRAFT_PRIME}/bin" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-broker" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-client" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-config" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-manager" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-monitor" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-package-changer" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-package-reporter" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-release-upgrader" "${SNAPCRAFT_PRIME}/bin/" + cp "${SNAPCRAFT_PART_SRC}/scripts/landscape-sysinfo" "${SNAPCRAFT_PRIME}/bin/"