diff -Nru maas-1.5.4+bzr2294/buildout.cfg maas-1.7.6+bzr3376/buildout.cfg --- maas-1.5.4+bzr2294/buildout.cfg 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/buildout.cfg 2015-07-10 01:27:14.000000000 +0000 @@ -12,7 +12,6 @@ config-test repl sphinx - txlongpoll extensions = buildout-versions buildout_versions_file = versions.cfg versions = versions @@ -50,6 +49,7 @@ initialization = from os import environ environ.setdefault("MAAS_CONFIG_DIR", "${buildout:directory}/etc/maas") + environ.setdefault("MAAS_ROOT", "${buildout:directory}/run") [database] recipe = z3c.recipe.scripts @@ -72,15 +72,12 @@ djorm-ext-pgarray docutils crochet - iscpy entry-points = - celeryd.region=celery.bin.worker:main maas-region-admin=django.core.management:execute_from_command_line initialization = ${common:initialization} environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.development") scripts = - celeryd.region maas-region-admin extra-paths = ${common:extra-paths} @@ -95,14 +92,20 @@ ${maas:initialization} sys.argv[1:1] = [ "test", "--noinput", "--exclude=provisioningserver", - "--exclude=maastesting", "--exclude=maascli"] + "--exclude=maastesting", "--exclude=maascli", + "--logging-level=INFO", + # Reduce the logging level to INFO here as + # DebuggingLoggerMiddleware logs the content of all the + # requests at DEBUG level: we don't want this in the + # tests as it's too verbose. + ] scripts = test.maas extra-paths = ${maas:extra-paths} [maas-probe-dhcp] recipe = zc.recipe.egg -eggs = +eggs = ${maas:eggs} entry-points = maas-probe-dhcp=provisioningserver.dhcp.probe:main @@ -156,13 +159,11 @@ eggs = crochet entry-points = - celeryd.cluster=celery.bin.worker:main maas-provision=provisioningserver.__main__:main twistd.pserv=twisted.scripts.twistd:run extra-paths = ${common:extra-paths} scripts = - celeryd.cluster maas-provision twistd.pserv initialization = @@ -223,10 +224,3 @@ scripts = ipy entry-points = ipy=IPython.frontend.terminal.ipapp:launch_new_instance - -[txlongpoll] -recipe = z3c.recipe.scripts -eggs = -extra-paths = /buildout/creates/an/invalid/list/literal/without/this -entry-points = twistd.txlongpoll=twisted.scripts.twistd:run -scripts = twistd.txlongpoll diff -Nru maas-1.5.4+bzr2294/CHANGELOG maas-1.7.6+bzr3376/CHANGELOG --- maas-1.5.4+bzr2294/CHANGELOG 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/CHANGELOG 2015-07-10 01:27:14.000000000 +0000 @@ -2,20 +2,768 @@ Changelog ========= -1.5.4 +1.7.6 +===== + +Bug Fix Update +-------------- + +#1470585 Accept list of forwarders for upstream_dns rather than just one. + +#1413388 Fix upgrade issue where it would remove custom DNS config, + potentially breaking DNS. + +1.7.5 +===== + +Bug Fix Update +-------------- + +#1456969 MAAS cli/API: missing option set use-fast-installer / + use-debian-installer + +1.7.4 +===== + +Bug Fix Update +-------------- + +#1456892 500 error: UnboundLocalError: local variable 'key_required' + referenced before assignment +#1387859 When MAAS has too many leases, and lease parsing fails, MAAS fails + to auto-map NIC with network +#1329267 Alert a command-line user of `maas` when their local API + description is out-of-date. + +1.7.3 +===== + +Bug Fix Update +-------------- + +#1441933 Internal Server Error when saving a cluster without Router IP +#1441133 MAAS version not exposed over the API +#1437094 Sorting by mac address on webui causes internal server error +#1439359 Automatically set correct boot resources selection and start import + after upgrade from MAAS 1.5; Ensures MAAS is usable after upgrade. +#1439366 Backwards compatibility with MAAS 1.5 preseeds and custom preseeds. + Ensures that users dont have to manually change preseeds names. + +1.7.2 +===== + +Bug Fix Update +-------------- + +For full details see https://launchpad.net/maas/+milestone/1.7.2 + +#1331214 Support AMT Version > 8 +#1397567 Fix call to amttool when restarting a node to not fail disk erasing. +#1415538 Do not generate the 'option routers' stanza if router IP is None. +#1403909 Do not deallocate StaticIPAddress before node has powered off. +#1405998 Remove all OOPS reporting. +#1423931 Update the nodes host maps when a sticky ip address is claimed over the API. +#1433697 Look for bootloaders in /usr/lib/EXTLINUX + + +1.7.1 +===== + +Minor feature improvements +-------------------------- + +New CentOS Release support. + Further to the work done in the 1.7.0 MAAS Release, MAAS now supports + uploading various versions of CentOS. Previously MAAS would only + officially support 6.5. + +Power Monitoring for Seamicro 15000, Cisco UCS and HP Moonshot Chassis + Further the work done in the 1.7.0 MAAS release, it now supports power + query and monitoring for the Seamicro 15000 Chassis, the Cisco UCS + Chassis Manager and the HP Moonshot Chassis Manager. + +Node Listing Page and Node Event Log live refresh + The Node Listing page and the Node Event Log now have live refresh + every 10 seconds. This allows MAAS to display the latest node status + and events without forcing a browser refresh. + +IP Address Reservation + The static IP address reservation API now has an optional "mac" + parameter. Specifying a MAC address here will link the new static IP + to that MAC address. A DHCP host map will be created for the MAC + address. No other IPs may be reserved for that MAC address until the + current one is released. + +Bug fix update +-------------- + +For full details see https://launchpad.net/maas/+milestone/1.7.1 + +#1330765 If start_nodes() fails, it doesn't clean up after itself. +#1373261 pserv.yaml rewrite breaks when previous generator URL uses IPv6 address +#1386432 After update to the latest curtin that changes the log to install.log MAAS show's two installation logs +#1386488 If rndc fails, you get an Internal Server Error page +#1386502 No "failed" transition from "new" +#1386914 twisted Unhandled Error when region can't reach upstream boot resource +#1391139 Tagged VLAN on aliased NIC breaks migration 0099 +#1391161 Failure: twisted.internet.error.ConnectionDone: Connection was closed cleanly. +#1391411 metadata API signal() is releasing host maps at the end of installation +#1391897 Network names with dots cause internal server error when on node pages +#1394382 maas does not know about VM "paused" state +#1396308 Removing managed interface causes maas to delete nodes +#1397356 Disk Wiping fails if installation is not Ubuntu +#1398405 MAAS UI reports storage size in Gibibytes (base 2) but is labeled GB - Gigabytes (base 10). +#1399331 MAAS leaking sensitive information in ps ax output +#1400849 Check Power State disappears after upgrade to 1.7 bzr 3312 +#1401241 custom dd-tgz format images looked for in wrong path, so they don't work +#1401983 Exception: deadlock detected +#1403609 can not enlist chassis with maas admin node-group probe-and-enlist-mscm +#1283106 MAAS allows the same subnet to be defined on two managed interfaces of the same cluster +#1303925 commissioning fails silently if a node can't reach the region controller +#1357073 power state changes are not reflected quickly enough in the UI +#1360280 boot-source-selections api allows adding bogus and duplicated values +#1368400 Can't power off nodes that are in Ready state but on +#1370897 The node power monitoring service does not check nodes in parallel +#1376024 gpg --batch [...]` error caused by race in BootSourceCacheService +#1376716 AMT NUC stuck at boot prompt instead of powering down (no ACPI support in syslinux poweroff) +#1378835 Config does not have a unique index on name +#1379370 Consider removing transaction in claim_static_ip_addresses(). +#1379556 Panicky log warning that is irrelevant +#1381444 Misleading error message in log "Unknown power_type 'sm15k'" +#1382166 Message disclosing image import necessary visible while not logged in +#1382237 UnicodeEncodeError when unable to create host maps +#1383231 Error message when trying to reserve the same static IP twice is unhelpful +#1383237 Error message trying to reserve an IP address when no static range is defined is misleading +#1384424 Seamicro Machines do not have Power Status Tracking +#1384428 HP Moonshot Chassis Manager lacks power status monitoring +#1384924 need to provide a better upgrade message for images on the cluster but not on the region +#1386517 DHCP leases are not released at the end of commissioning and possibly enlistment +#1387239 MAAS does not provide an API for reserving a static IP for a given MAC address +#1387414 Race when registering new event type +#1388033 Trying to reserve a static IP when no more IPs are available results in 503 Service Unavailable with no error text +#1389602 Inconsistent behavior in the checks to delete a node +#1389733 node listing does not update the status and power of nodes +#1390144 Node 'releasing' should have a timeout +#1391193 API error documentation +#1391421 Names of custom boot-resources not visible in the web UI +#1391891 Spurious test failure: TestDNSForwardZoneConfig_GetGenerateDirectives.test_returns_single_entry_for_tiny_network +#1393423 PowerKVM / VIrsh import should allow you to specify a prefix to filter VM's to import +#1393953 dd-format images fail to deploy +#1400909 Networks are being autocreated like eth0-eth0 instead of maas-eth0 +#1401349 Memory size changes to incorrect size when page is refreshed +#1402237 Node event log queries are slow (over 1 second) +#1402243 Nodes in 'Broken' state are being power queried constantly +#1402736 clicking on zone link from node page - requested URL was not found on this server +#1403043 Wrong top-level tab is selected when viewing a node +#1381609 Misleading log message when a node has a MAC address not attached to a cluster interface +#1386909 Misleading Error: Unable to identify boot image for (ubuntu/amd64/generic/trusty/local): cluster 'maas' does not have matching boot image. +#1388373 Fresh image import of 3 archs displaying multiple rows for armhf and amd64 +#1398159 TFTP into MAAS server to get pxelinux.0 causes unhandled error +#1383651 Node.start() and Node.stop() raise MulltipleFailures unnecessarily +#1383668 null" when releasing an IP address is confusing +#1389416 Power querying for UCSM not working +#1399676 UX bug: mac address on the nodes page should be the MAC address it pxe booted from +#1399736 MAAS should display memory sizes in properly labeld base 2 units - MiB, GiB, etc. +#1401643 Documentation has wrong pattern for user provided preseeds +#1401707 Slow web performance (5+ minute response time) on MAAS with many nodes +#1403609 Fix MSCM chassis enlistment. +#1409952 Correctly parse MAC Address for Power8 VM enlistment. +#1409852 Do not fail when trying to perform an IP Address Reservation. +#1413030 OS and Release no longer populate on Add Node page +#1414036 Trying to add an empty network crashes (AddrFormatError) + + +1.7.0 +===== + +Important announcements +----------------------- + +**Re-import your boot images** + You must re-import your boot images, see below for details. + +**Update Curtin preseed files** + Two changes were made to Curtin preseed files that need your attention + if you made any customisations: + + * The OS name must now appear in the filename. The new schema is shown + here, each file pattern is tried in turn until a match is found:: + + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release}_{node_name} + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release} + {prefix}_{osystem}_{node_arch}_{node_subarch} + {prefix}_{osystem}_{node_arch} + {prefix}_{osystem} + {prefix} + + * If you are modifying ``/etc/network/interfaces`` in the preseed, it must be + moved so it is processed last in ``late_commands`` since MAAS now writes + to this file itself as part of IPv6 setup. For example:: + + late_commands: + bonding_02: ["curtin", "in-target", "--", "wget", "-O", "/etc/network/interfaces", "http://[...snip...]"] + + must now look like this:: + + late_commands: + zz_write_ifaces: ["curtin", "in-target", "--", "wget", "-O", "/etc/network/interfaces", "http://[...snip...]"] + + The leading ``zz`` ensures the command sorts to the end of the + ``late_commands`` list. + + +Major new features +------------------ + +**Improved image downloading and reporting.** + MAAS boot images are now downloaded centrally by the region controller + and disseminated to all registered cluster controllers. This change includes + a new web UI under the `Images` tab that allows the admin to select + which images to import and shows the progress of the ongoing download. + This completely replaces any file-based configuration that used to take + place on cluster controllers. The cluster page now shows whether it has + synchronised all the images from the region controller. + + This process is also completely controllable using the API. + +.. Note:: + Unfortunately due to a format change in the way images are stored, it + was not possible to migrate previously downloaded images to the new region + storage. The cluster(s) will still be able to use the existing images, + however the region controller will be unaware of them until an import + is initiated. When the import is finished, the cluster(s) will remove + older image resources. + + This means that the first thing to do after upgrading to 1.7 is go to the + `Images` tab and re-import the images. + +**Increased robustness.** + A large amount of effort has been given to ensuring that MAAS remains + robust in the face of adversity. An updated node state model has been + implemented that takes into account more of the situations in which a + node can be found including any failures at each stage. + + When a node is getting deployed, it is now monitored to check that each + stage is reached in a timely fashion; if it does not then it is marked + as failed. + + The core power driver was updated to check the state of the power on each + node and is reported in the web UI and API. The core driver now also + handles retries when changing the power state of hardware, removing the + requirement that each power template handle it individually. + +**RPC security.** + As a step towards mutually verified TLS connections between MAAS's + components, 1.7 introduces a simple shared-secret mechanism to + authenticate the region with the clusters and vice-versa. For those + clusters that run on the same machine as the region controller (which + will account for most people), everything will continue to work + without intervention. However, if you're running a cluster on a + separate machine, you must install the secret: + + 1. After upgrading the region controller, view /var/lib/maas/secret + (it's text) and copy it. + + 2. On each cluster, run: + + sudo -u maas maas-provision install-shared-secret + + You'll be prompted for the secret; paste it in and press enter. It + is a password prompt, so the secret will not be echoed back to you. + + That's it; the upgraded cluster controller will find the secret + without needing to be told. + +**RPC connections.** + Each cluster maintains a persistent connection to each region + controller process that's running. The ports on which the region is + listening are all high-numbered, and they are allocated randomly by + the OS. In a future release of MAAS we will narrow this down. For now, + each cluster controller needs unfiltered access to each machine in the + region on all high-numbered TCP ports. + +**Node event log.** + For every major event on nodes, it is now logged in a node-specific log. + This includes events such as power changes, deployments and any failures. + +**IPv6.** + It is now possible to deploy Ubuntu nodes that have IPv6 enabled. + See :doc:`ipv6` for more details. + +**Removal of Celery and RabbitMQ.** + While Celery was found to be very reliable it ultimately did not suit + the project's requirements as it is a largely fire-and-forget mechanism. + Additionally it was another moving part that caused some headaches for + users and admins alike, so the decision was taken to remove it and implement + a custom communications mechanism between the region controller and cluster + controllers. The new mechanism is bidirectional and allowed the complex + interactions to take place that are required as part of the robustness + improvements. + + Since a constant connection is maintained, as a side effect the web UI now + shows whether each cluster is connected or not. + +**Support for other OSes.** + Non-Ubuntu OSes are fully supported now. This includes: + - Windows + - Centos + - SuSE + +**Custom Images.** + MAAS now supports the deployment of Custom Images. Custom images can be + uploaded via the API. The usage of custom images allows the deployment of + other Ubuntu Flavors, such as Ubuntu Desktop. + +**maas-proxy.** + MAAS now uses maas-proxy as the default proxy solution instead of + squid-deb-proxy. On a fresh install, MAAS will use maas-proxy by default. + On upgrades from previous releases, MAAS will install maas-proxy instead of + squid-deb-proxy. + +Minor notable changes +--------------------- + +**Better handling of networks.** + All networks referred to by cluster interfaces are now automatically + registered on the Network page. Any node network interfaces are + automatically linked to the relevant Network. + +.. Note:: + Commissioning currently requires an IP address to be available for each + network interface on a network that MAAS manages; this allows MAAS to + auto-populate its networks database. In general you should use a + well-sized network (/16 recommended if you will be using containers and + VMs) and dynamic pool. If this feature risks causing IP exhaustion for + your deployment and you do not need the auto-populate functionality, you + can disable it by running the following command on your region controller:: + + sudo maas maas set-config name=enable_dhcp_discovery_on_unconfigured_interfaces value=False + +**Improved logging.** + A total overhaul of where logging is produced was undertaken, and now + all the main events in MAAS are selectively reported to syslog with the + "maas" prefix from both the region and cluster controllers alike. If MAAS + is installed using the standard Ubuntu packaging, its syslog entries are + redirected to /var/log/maas/maas.log. + + On the clusters, pserv.log is now less chatty and contains only errors. + On the region controller appservers, maas-django.log contains only appserver + errors. + +**Static IP selection.** + The API was extended so that specific IPs can be pre-allocated for network + interfaces on nodes and for user-allocated IPs. + +**Pronounceable random hostnames.** + The old auto-generated 5-letter names were replaced with a pseudo-random + name that is produced from a dictionary giving names of the form + 'adjective-noun'. + + +Known Problems & Workarounds +---------------------------- + +**Upgrade issues** + There may be upgrade issues for users currently on MAAS 1.5 and 1.6; while we + have attempted to reproduce and address all the issues reported, some bugs + remain inconclusive. We recommend a full, tested backup of the MAAS servers + before attempting the upgrade to 1.7. If you do encounter issues, please file + these and flag them to the attention of the MAAS team and we will address them + in point-releases. See bugs `1381058`_, `1382266`_, `1379890`_, `1379532`_, + and `1379144`_. + +.. _1381058: + https://launchpad.net/bugs/1381058 +.. _1382266: + https://launchpad.net/bugs/1382266 +.. _1379890: + https://launchpad.net/bugs/1379890 +.. _1379532: + https://launchpad.net/bugs/1379532 +.. _1379144: + https://launchpad.net/bugs/1379144 + +**Split Region/Cluster set-ups** + If you site your cluster on a separate host to the region, it needs a + security key to be manually installed by running + ``maas-provision install-shared-secret`` on the cluster host. + +**Private boot streams** + If you had private boot image stream information configured in MAAS 1.5 or + 1.6, upgrading to 1.7 will not take that into account and it will need to be + manually entered on the settings page in the MAAS UI (bug `1379890`_) + +.. _1379890: + https://launchpad.net/bugs/1379890 + +**Concurrency issues** + Concurrency issues expose us to races when simultaneous operations are + triggered. This is the source of many hard to reproduce issues which will + require us to change the default database isolation level. We intend to address + this in the first point release of 1.7. + +**Destroying a Juju environment** + When attempting to "juju destroy" an environment, nodes must be in the DEPLOYED + state; otherwise, the destroy will fail. You should wait for all in-progress + actions on the MAAS cluster to conclude before issuing the command. (bug + `1381619`_) + +.. _1381619: + https://launchpad.net/bugs/1381619 + +**AMT power control** + A few AMT-related issues remain, with workarounds: + + * Commissioning NUC reboots instead of shutting down (bug `1368685`_). There + is `a workaround in the power template`_ + + * MAAS (amttool) cannot control AMT version > 8. See `workaround described in + bug 1331214`_ + + * AMT NUC stuck at boot prompt instead of powering down (no ACPI support in + syslinux poweroff) (bug `1376716`_). See the `ACPI-only workaround`_ + +.. _1368685: + https://bugs.launchpad.net/maas/+bug/1368685 +.. _a workaround in the power template: + https://bugs.launchpad.net/maas/+bug/1368685/comments/8 +.. _workaround described in bug 1331214: + https://bugs.launchpad.net/maas/+bug/1331214/comments/18 +.. _1376716: + https://bugs.launchpad.net/maas/+bug/1376716 +.. _ACPI-only workaround: + https://bugs.launchpad.net/maas/+bug/1376716/comments/12 + + +**Disk wiping** + If you enable disk wiping, juju destroy-environment may fail for you. The + current workaround is to wait and re-issue the command. This will be fixed in + future versions of MAAS & Juju. (bug `1386327`_) + +.. _1386327: + https://bugs.launchpad.net/maas/+bug/1386327 + +**BIND with DNSSEC** + If you are using BIND with a forwarder that uses DNSSEC and have not + configured certificates, you will need to explicitly disable that feature in + your BIND configuration (1384334) + +.. _1384334: + https://bugs.launchpad.net/maas/+bug/1384334 + +**Boot source selections on the API** + Use of API to change image selections can leave DB in a bad state + (bug `1376812`_). It can be fixed by issuing direct database updates. + +.. _1376812: + https://bugs.launchpad.net/maas/+bug/1376812 + +**Disabling DNS** + Disabling DNS may not work (bug `1383768`_) + +.. _1383768: + https://bugs.launchpad.net/maas/+bug/1383768 + +**Stale DNS zone files** + Stale DNS zone files may be left behind if the MAAS domainname is changed + (bug `1383329`_) + +.. _1383329: + https://bugs.launchpad.net/maas/+bug/1383329 + + + +Major bugs fixed in this release +-------------------------------- + +See https://launchpad.net/maas/+milestone/1.7.0 for full details. + +#1081660 If maas-enlist fails to reach a DNS server, the node will be named ";; connection timed out; no servers could be reached" + +#1087183 MaaS cloud-init configuration specifies 'manage_etc_hosts: localhost' + +#1328351 ConstipationError: When the cluster runs the "import boot images" task it blocks other tasks + +#1342117 CLI command to set up node-group-interface fails with /usr/lib/python2.7/dist-packages/maascli/__main__.py: error: u'name' + +#1349254 Duplicate FQDN can be configured on MAAS via CLI or API + +#1352575 BMC password showing in the apache2 logs + +#1355534 UnknownPowerType traceback in appserver log + +#1363850 Auto-enlistment not reporting power parameters + +#1363900 Dev server errors while trying to write to '/var/lib/maas' + +#1363999 Not assigning static IP addresses + +#1364481 http 500 error doesn't contain a stack trace + +#1364993 500 error when trying to acquire a commissioned node (AddrFormatError: failed to detect a valid IP address from None) + +#1365130 django-admin prints spurious messages to stdout, breaking scripts + +#1365850 DHCP scan using cluster interface name as network interface? + +#1366172 NUC does not boot after power off/power on + +#1366212 Large dhcp leases file leads to tftp timeouts + +#1366652 Leaking temporary directories + +#1368269 internal server error when deleting a node + +#1368590 Power actions are not serialized. + +#1370534 Recurrent update of the power state of nodes crashes if the connection to the BMC fails. + +#1370958 excessive pserv logging + +#1372767 Twisted web client does not support IPv6 address + +#1372944 Twisted web client fails looking up IPv6 address hostname + +#1373031 Cannot register cluster + +#1373103 compose_curtin_network_preseed breaks installation of all other operating systems + +#1373368 Conflicting power actions being dropped on the floor can result in leaving a node in an inconsistent state + +#1373699 Cluster Listing Page lacks feedback about the images each cluster has + +#1374102 No retries for AMT power? + +#1375980 Nodes failed to transition out of "New" state on bulk commission + +#1376023 After performing bulk action on maas nodes, Internal Server Error + +#1376888 Nodes can't be deleted if DHCP management is off. + +#1377099 Bulk operation leaves nodes in inconsistent state + +#1379209 When a node has multiple interfaces on a network MAAS manages, MAAS assigns static IP addresses to all of them + +#1379744 Cluster registration is fragile and insecure + +#1380932 MAAS does not cope with changes of the dhcp daemons + +#1381605 Not all the DNS records are being added when deploying multiple nodes + +#1012954 If a power script fails, there is no UI feedback + +#1186196 "Starting a node" has different meanings in the UI and in the API. + +#1237215 maas and curtin do not indicate failure reasonably + +#1273222 MAAS doesn't check return values of power actions + +#1288502 archive and proxy settings not honoured for commissioning + +#1316919 Checks don't exist to confirm a node will actually boot + +#1321885 IPMI detection and automatic setting fail in ubuntu 14.04 maas + +#1325610 node marked "Ready" before poweroff complete + +#1325638 Add hardware enablement for Universal Management Gateway + +#1340188 unallocated node started manually, causes AssertionError for purpose poweroff + +#1341118 No feedback when IPMI credentials fail + +#1341121 No feedback to user when cluster is not running + +#1341581 power state is not represented in api and ui + +#1341800 MAAS doesn't support soft power off through the API + +#1344177 hostnames can't be changed while a node is acquired + +#1347518 Confusing error message when API key is wrong + +#1349496 Unable to request a specific static IP on the API + +#1349736 MAAS logging is too verbose and not very useful + +#1349917 guess_server_address() can return IPAddress or hostname + +#1350103 No support for armhf/keystone architecture + +#1350856 Can't constrain acquisition of nodes by not having a tag + +#1356880 MAAS shouldn't allow changing the hostname of a deployed node + +#1357714 Virsh power driver does not seem to work at all + +#1358859 Commissioning output xml is hard to understand, would be nice to have yaml as an output option. + +#1359169 MAAS should handle invalid consumers gracefully + +#1359822 Gateway is missing in network definition + +#1363913 Impossible to remove last MAC from network in UI + +#1364228 Help text for node hostname is wrong + +#1364591 MAAS Archive Mirror does not respect non-default port + +#1365616 Non-admin access to cluster controller config + +#1365619 DNS should be an optional field in the network definition + +#1365776 commissioning results view for a node also shows installation results + +#1366812 Old boot resources are not being removed on clusters + +#1367455 MAC address for node's IPMI is reversed looked up to yield IP address using case sensitive comparison + +#1373580 [SRU] Glen m700 cartridge list as ARM64/generic after enlist + +#1373723 Releasing a node without power parameters ends up in not being able to release a node + +#1233158 no way to get power parameters in api + +#1319854 `maas login` tells you you're logged in successfully when you're not + +#1368480 Need API to gather image metadata across all of MAAS + +#1281406 Disk/memory space on Node edit page have no units + +#1299231 MAAS DHCP/DNS can't manage more than a /16 network + +#1357381 maas-region-admin createadmin shows error if not params given + +#1376393 powerkvm boot loader installs even when not needed + +#1287224 MAAS random generated hostnames are not pronounceable + +#1348364 non-maas managed subnets cannot query maas DNS + + +1.6.1 ===== Bug fix update -------------- - - Package fails to install when the default route is through an - aliased/tagged interface (LP: #1350235) - - ERROR Nonce already used (LP: #1190986) - - Add MAAS arm64/xgene support (LP: #1338851) - - Add utopic support (LP: #1337437) - - API documentation for nodegroup op=details missing parameter - (LP: #1331982) +- Auto-link node MACs to Networks (LP: #1341619) + MAAS will now auto-create a Network from a cluster interface, and + if an active lease exists for a node's MAC then it will be linked to + that Network. + + +1.6.0 +===== + +Special notice: + Cluster interfaces now have static IP ranges in order to give nodes stable + IP addresses. You need to set the range in each interface to turn on this + feature. See below for details. + +Major new features +------------------ + +IP addresses overhaul. + This release contains a total reworking of IP address allocation. You can + now define a separate "static" range in each cluster interface configuration + that is separate from the DHCP server's dynamic range. Any node in use by + a user will receive an IP address from the static range that is guaranteed + not to change during its allocated lifetime. Previously, this was at the + whim of the DHCP server despite MAAS placing host maps in its configuration. + + Currently, dynamic IP addresses will continue to receive DNS entries so as + to maintain backward compatibility with installations being upgraded from + 1.5. However, this will be changed in a future release to only give + DNS entries to static IPs. + + You can also use the API to `reserve IP addresses`_ on a per-user basis. + +.. _reserve IP addresses: http://maas.ubuntu.com/docs1.6/api.html#ip-addresses + +Support for additional OSes. + MAAS can now install operating systems other than Ubuntu on nodes. + Preliminary beta support exists for CentOS and SuSE via the `Curtin`_ "fast" + installer. This has not been thoroughly tested yet and has been provided + in case anyone finds this useful and is willing to help find and report bugs. + + +Minor notable changes +--------------------- + +DNS entries + In 1.5 DNS entries for nodes were a CNAME record. As of 1.6, they are now + all "A" records, which allows for reliable reverse look-ups. + + Only nodes that are allocated to a user and started will receive "A" record + entries. Unallocated nodes no longer have DNS entries. + +Removal of bootresources.yaml + The bootresources.yaml file, which had to be configured separately on each + cluster controller, is no longer in use. Instead, the configuration for + which images to download is now held by the region controller, and defaults + to downloading all images for LTS releases. A `rudimentary API`_ is + available to manipulate this configuration. + +.. _rudimentary API: http://maas.ubuntu.com/docs1.6/api.html#boot-source + +Fast installer is now the default + Prevously, the slower Debian installer was used by default. Any newly- + enlisted nodes will now use the newer `fast installer`_. Existing nodes + will keep the installer setting that they already have. + +.. _fast installer: https://launchpad.net/curtin + + +Bugs fixed in this release +-------------------------- +#1307779 fallback from specific to generic subarch broken +#1310082 d-i with precise+hwe-s stops at "Architecture not supported" +#1314174 Autodetection of the IPMI IP address fails when the 'power_address' +of the power parameters is empty. +#1314267 MAAS dhcpd will re-issue leases for nodes +#1317675 Exception powering down a virsh machine +#1322256 Import boot resources failing to verify keyring +#1322336 import_boot_images crashes with KeyError on 'keyring' +#1322606 maas-import-pxe-files fails when run from the command line +#1324237 call_and_check does not report error output +#1328659 import_boot_images task fails on utopic +#1332596 AddrFormatError: failed to detect a valid IP address from None executing upload_dhcp_leases task +#1250370 "sudo maas-import-ephemerals" steps on ~/.gnupg/pubring.gpg +#1250435 CNAME record leaks into juju's private-address, breaks host based access control +#1305758 Import fails while writing maas.meta: No such file or directory +#1308292 Unhelpful error when re-enlisting a previously enlisted node +#1309601 maas-enlist prints "successfully enlisted" even when enlistment fail +s. +#1309729 Fast path installer is not the default +#1310844 find_ip_via_arp() results in unpredictable, and in some cases, incorrect IP addresses +#1310846 amt template gives up way too easily +#1312863 MAAS fails to detect SuperMicro-based server's power type +#1314536 Copyright date in web UI is 2012 +#1315160 no support for different operating systems +#1316627 API needed to allocate and return an extra IP for a container +#1323291 Can't re-commission a commissioning node +#1324268 maas-cli 'nodes list' or 'node read ' doesn't display the osystem or distro_series node fields +#1325093 install centos using curtin +#1325927 YUI.Array.each not working as expected +#1328656 MAAS sends multiple stop_dhcp_server tasks even though there's no dhcp server running. +#1331139 IP is inconsistently capitalized on the 'edit a cluster interface' p +age +#1331148 When editing a cluster interface, last 3 fields are unintuitive +#1331165 Please do not hardcode the IP address of Canonical services into MAAS managed DHCP configs +#1338851 Add MAAS arm64/xgene support +#1307693 Enlisting a SeaMicro or Virsh chassis twice will not replace the missing entries +#1311726 No documentation about the supported power types and the related power parameters +#1331982 API documentation for nodegroup op=details missing parameter +#1274085 error when maas can't meet juju constraints is confusing and not helpful +#1330778 MAAS needs support for managing nodes via the Moonshot HP iLO Chassis Manager CLI +#1337683 The API client MAASClient doesn't encode list parameters when doing a GET +#1190986 ERROR Nonce already used +#1342135 Allow domains to be used for NTP server configuration, not just IPs +#1337437 Allow 14.10 Utopic Unicorn as a deployable series +#1350235 Package fails to install when the default route is through an aliased/tagged interface +#1353597 PowerNV: format_bootif should make sure mac address is all lowercase 1.5.3 ===== @@ -36,22 +784,17 @@ 1.5.2 ===== -Bug fix update --------------- +Minor feature changes +--------------------- -- Remove workaround for fixed Django bug 1311433 (LP: #1311433) -- Ensure that validation errors are returned when adding a node over - the API and its cluster controller is not contactable. (LP: #1305061) -- Hardware enablement support for PowerKVM -- Shorten the time taken for a cluster to initially connect to the region - via RPC to around 2 seconds (LP: #1317682) -- Faster DHCP leases parser (LP: #1305102) -- Documentation fixed explaining how to enable an ephemeral backdoor - (LP: #1321696) -- Use probe-and-enlist-hardware to enlist all virtual machine inside - a libvirt machine, allow password qemu+ssh connections. - (LP: #1315155, LP: #1315157) -- Rename ppc64el boot loader to PowerKVM (LP: #1315154) +Boot resource download changes. + Further to the work done in the 1.5 (Ubuntu 14.04) release, MAAS no + longer stores the configuration for downloading boot resources in + ``/etc/maas/bootresources.yaml``; this file is now obsolete. The + sources list is now stored on the region controller and passed to the + cluster controller when the job to download boot resources is started. + It is still possible to pass a list of sources to + ``maas-import-pxe-files`` when running the script manually. 1.5.1 @@ -175,7 +918,7 @@ #1255479 MaaS Internal Server Error 500 while parsing tags with namespaces in definition upon commissioning -#1269648 OAuth unauthorized errors mask the actual error text +#1269648 OAuth unauthorised errors mask the actual error text #1270052 Adding an SSH key fails due to a UnicodeDecodeError @@ -241,7 +984,7 @@ #1237197 No scheduled job for images download -#1238284 mutiple ip address displayed for a node +#1238284 multiple ip address displayed for a node #1243917 'maas createsuperuser' errors out if no email address is entered. @@ -269,7 +1012,7 @@ #1274465 Network identity shows broadcast address instead of the network's address -#1274499 dhcp lease rollover causes loss of access to managment IP +#1274499 dhcp lease rollover causes loss of access to management IP #1275643 When both IPMI 1.5 and 2.0 are available, MAAS should use 2.0 diff -Nru maas-1.5.4+bzr2294/contrib/maas-http.conf maas-1.7.6+bzr3376/contrib/maas-http.conf --- maas-1.5.4+bzr2294/contrib/maas-http.conf 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/maas-http.conf 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -WSGIDaemonProcess maas user=maas group=maas processes=2 threads=1 display-name=%{GROUP} +WSGIDaemonProcess maas user=maas group=maas home=/var/lib/maas processes=2 threads=1 display-name=%{GROUP} # Without this, defining a tag as a malformed xpath expression will hang # the region controller. @@ -37,12 +37,6 @@ -# Proxy to txlongpoll server. - - ProxyPreserveHost on - ProxyPass /MAAS/longpoll/ http://localhost:5242/ retry=1 - - # This can be safely removed once Django 1.4 is used: admin media # will be served using staticfiles. Alias /MAAS/static/admin/ /usr/share/pyshared/django/contrib/admin/media/ diff -Nru maas-1.5.4+bzr2294/contrib/maas_local_celeryconfig_cluster.py maas-1.7.6+bzr3376/contrib/maas_local_celeryconfig_cluster.py --- maas-1.5.4+bzr2294/contrib/maas_local_celeryconfig_cluster.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/maas_local_celeryconfig_cluster.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -# UUID identifying the running cluster controller. -CLUSTER_UUID = None diff -Nru maas-1.5.4+bzr2294/contrib/maas_local_celeryconfig.py maas-1.7.6+bzr3376/contrib/maas_local_celeryconfig.py --- maas-1.5.4+bzr2294/contrib/maas_local_celeryconfig.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/maas_local_celeryconfig.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# Broker connection information. -# Format: transport://userid:password@hostname:port/virtual_host -BROKER_URL = '' diff -Nru maas-1.5.4+bzr2294/contrib/maas_local_settings.py maas-1.7.6+bzr3376/contrib/maas_local_settings.py --- maas-1.5.4+bzr2294/contrib/maas_local_settings.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/maas_local_settings.py 2015-07-10 01:27:14.000000000 +0000 @@ -23,12 +23,6 @@ # Use the package's files to serve RaphaelJS. RAPHAELJS_LOCATION = '/usr/share/javascript/raphael/' -# RabbitMQ settings. -RABBITMQ_HOST = 'localhost' -RABBITMQ_USERID = 'maas_longpoll' -RABBITMQ_PASSWORD = '' -RABBITMQ_VIRTUAL_HOST = '/maas_longpoll' - # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize the logging configuration. LOGGING_LEVEL = 'INFO' @@ -43,7 +37,10 @@ 'handlers': { 'log': { 'class': 'logging.handlers.RotatingFileHandler', - 'filename': '/var/log/maas/maas.log', + # DO NOT point this file at /var/log/maas/maas.log; MAAS now + # uses syslog to log to that file, and pointing the Django + # log output to it will clobber the syslog output. + 'filename': '/var/log/maas/maas-django.log', 'formatter': 'simple', }, }, @@ -77,6 +74,9 @@ } # Database access configuration. +from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED + + DATABASES = { 'default': { # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' etc. @@ -85,5 +85,8 @@ 'USER': '', 'PASSWORD': '', 'HOST': 'localhost', + 'OPTIONS': { + 'isolation_level': ISOLATION_LEVEL_READ_COMMITTED, + }, } } diff -Nru maas-1.5.4+bzr2294/contrib/maas-rsyslog.conf maas-1.7.6+bzr3376/contrib/maas-rsyslog.conf --- maas-1.5.4+bzr2294/contrib/maas-rsyslog.conf 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/maas-rsyslog.conf 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,2 @@ +# Log MAAS messages to their own file. +:syslogtag,contains,"maas" /var/log/maas/maas.log diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_centos maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_centos --- maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_centos 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_centos 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,12 @@ +#cloud-config +debconf_selections: + maas: | + {{for line in str(curtin_preseed).splitlines()}} + {{line}} + {{endfor}} + +late_commands: + maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] + +power_state: + mode: reboot diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_custom maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_custom --- maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_custom 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_custom 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,12 @@ +#cloud-config +debconf_selections: + maas: | + {{for line in str(curtin_preseed).splitlines()}} + {{line}} + {{endfor}} + +late_commands: + maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] + +power_state: + mode: reboot diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_suse maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_suse --- maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_suse 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_suse 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,12 @@ +#cloud-config +debconf_selections: + maas: | + {{for line in str(curtin_preseed).splitlines()}} + {{line}} + {{endfor}} + +late_commands: + maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] + +power_state: + mode: reboot diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_windows maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_windows --- maas-1.5.4+bzr2294/contrib/preseeds_v2/curtin_userdata_windows 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/curtin_userdata_windows 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,14 @@ +#cloud-config +debconf_selections: + maas: | + {{for line in str(curtin_preseed).splitlines()}} + {{line}} + {{endfor}} + +late_commands: + maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] + +license_key: {{node.get_effective_license_key()}} + +power_state: + mode: reboot diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/enlist_userdata maas-1.7.6+bzr3376/contrib/preseeds_v2/enlist_userdata --- maas-1.5.4+bzr2294/contrib/preseeds_v2/enlist_userdata 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/enlist_userdata 2015-07-10 01:27:14.000000000 +0000 @@ -6,6 +6,23 @@ apt_proxy: http://{{server_host}}:8000/ {{endif}} +system_info: + package_mirrors: + - arches: [i386, amd64] + search: + primary: ["http://{{main_archive_hostname}}/{{main_archive_directory}}"] + security: ["http://{{main_archive_hostname}}/{{main_archive_directory}}"] + failsafe: + primary: "http://archive.ubuntu.com/ubuntu" + security: "http://security.ubuntu.com/ubuntu" + - arches: [default] + search: + primary: ["http://{{ports_archive_hostname}}/{{ports_archive_directory}}"] + security: ["http://{{ports_archive_hostname}}/{{ports_archive_directory}}"] + failsafe: + primary: "http://ports.ubuntu.com/ubuntu-ports" + security: "http://ports.ubuntu.com/ubuntu-ports" + misc_bucket: - &maas_enlist | #### IPMI setup ###### @@ -83,8 +100,9 @@ # thanks to 'IPAPPEND' (http://www.syslinux.org/wiki/index.php/SYSLINUX) url="{{server_url}}" host="" + dig_output="" ip=$(ifconfig eth0 | awk '$1 == "inet" { sub("addr:","",$2); print $2; }') && - [ -n "${ip}" ] && host=$(dig +short -x $ip) && host=${host%.} + [ -n "${ip}" ] && dig_output=$(dig +short -x $ip) && host=${dig_output%.} # load ipmi modules load_modules pargs="" diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/generic maas-1.7.6+bzr3376/contrib/preseeds_v2/generic --- maas-1.5.4+bzr2294/contrib/preseeds_v2/generic 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/generic 2015-07-10 01:27:14.000000000 +0000 @@ -2,7 +2,7 @@ {{def proxy}} d-i mirror/country string manual -{{if node.architecture in {'i386/generic', 'amd64/generic'} }} +{{if node.split_arch()[0] in {'i386', 'amd64'} }} d-i mirror/http/hostname string {{main_archive_hostname}} d-i mirror/http/directory string {{main_archive_directory}} {{else}} diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012 maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012 --- maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,166 @@ + + + + + + OnError + + + + 1 + 100 + Primary + + + 2 + true + Primary + + + + + true + + NTFS + 1 + 1 + + + NTFS + 2 + 2 + + + + 0 + true + + + + + + 2 + 0 + + false + OnError + + + /IMAGE/NAME + + Windows Server 2012 SERVERSTANDARD + + + + + + true + + {{node.license_key}} + OnError + + + + + + en-US + + en-US + en-US + en-US + en-US + + + + + + ClearType + + + true + + 3 + Work + true + true + true + + + + + + + + + {{preseed_data['hostname']}} + + + + + false + + + 0 + + + + + true + all + @FirewallAPI.dll,-28752 + + + + + UTC + {{preseed_data['hostname']}} + + + + + 1 + powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" + + + 2 + msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} + + + + + 0 + + + diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hv maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hv --- maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hv 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hv 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,166 @@ + + + + + + OnError + + + + 1 + 100 + Primary + + + 2 + true + Primary + + + + + true + + NTFS + 1 + 1 + + + NTFS + 2 + 2 + + + + 0 + true + + + + + + 2 + 0 + + false + OnError + + + /IMAGE/NAME + + Hyper-V Server 2012 SERVERHYPERCORE + + + + + + true + + + + + + en-US + + en-US + en-US + en-US + en-US + + + + + + ClearType + + + true + + 3 + Work + true + true + true + + + + + + + + + {{preseed_data['hostname']}} + + + + + false + + + 0 + + + + + true + all + @FirewallAPI.dll,-28752 + + + + + UTC + {{preseed_data['hostname']}} + + + + + 1 + powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" + + + 2 + msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} + + + + + 0 + + + diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hvr2 maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hvr2 --- maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hvr2 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hvr2 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,166 @@ + + + + + + OnError + + + + 1 + 100 + Primary + + + 2 + true + Primary + + + + + true + + NTFS + 1 + 1 + + + NTFS + 2 + 2 + + + + 0 + true + + + + + + 2 + 0 + + false + OnError + + + /IMAGE/NAME + + Hyper-V Server 2012 R2 SERVERHYPERCORE + + + + + + true + + + + + + en-US + + en-US + en-US + en-US + en-US + + + + + + ClearType + + + true + + 3 + Work + true + true + true + + + + + + + + + {{preseed_data['hostname']}} + + + + + false + + + 0 + + + + + true + all + @FirewallAPI.dll,-28752 + + + + + UTC + {{preseed_data['hostname']}} + + + + + 1 + powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" + + + 2 + msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} + + + + + 0 + + + diff -Nru maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012r2 maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012r2 --- maas-1.5.4+bzr2294/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012r2 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012r2 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,166 @@ + + + + + + OnError + + + + 1 + 100 + Primary + + + 2 + true + Primary + + + + + true + + NTFS + 1 + 1 + + + NTFS + 2 + 2 + + + + 0 + true + + + + + + 2 + 0 + + false + OnError + + + /IMAGE/NAME + + Windows Server 2012 R2 SERVERSTANDARD + + + + + + true + + {{node.license_key}} + OnError + + + + + + en-US + + en-US + en-US + en-US + en-US + + + + + + ClearType + + + true + + 3 + Work + true + true + true + + + + + + + + + {{preseed_data['hostname']}} + + + + + false + + + 0 + + + + + true + all + @FirewallAPI.dll,-28752 + + + + + UTC + {{preseed_data['hostname']}} + + + + + 1 + powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" + + + 2 + msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} + + + + + 0 + + + diff -Nru maas-1.5.4+bzr2294/debian/changelog maas-1.7.6+bzr3376/debian/changelog --- maas-1.5.4+bzr2294/debian/changelog 2015-01-09 11:49:25.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/changelog 2015-07-10 17:57:54.000000000 +0000 @@ -1,125 +1,361 @@ -maas (1.5.4+bzr2294-0ubuntu1.3) trusty-proposed; urgency=medium +maas (1.7.6+bzr3376-0ubuntu2~14.04.1) trusty; urgency=medium - [Louis Bouchard] - * debian/maas-cluster-controller.postinst: - - Exclude /var/log/maas/rsyslog when changing ownership - (LP: #1346703) - - -- Louis Bouchard Tue, 09 Dec 2014 14:01:27 +0100 - -maas (1.5.4+bzr2294-0ubuntu1.2) trusty-security; urgency=medium - - * Fix compatibility with mod-wsgi security update (LP: #1399016) - - debian/patches/home-directory.patch: specify a valid home directory - for the maas user, since mod-wsgi no longer works without one. - - -- Marc Deslauriers Thu, 04 Dec 2014 13:59:56 -0500 - -maas (1.5.4+bzr2294-0ubuntu1.1) trusty-proposed; urgency=medium - - * Add hardware enablement for armhf/keystone (LP: #1350103) - - -- Greg Lutostanski Thu, 18 Sep 2014 16:43:56 -0500 - -maas (1.5.4+bzr2294-0ubuntu1) trusty-proposed; urgency=medium - - * New upstream bug fix release: - - Change supported releases for install to Precise, Saucy, Trusty, Utopic - (Add Utopic; Remove Quantal/Raring) -- will still only be able to install - releases with streams available to maas (LP: #1337437) - - Package fails to install when the default route is through an - aliased/tagged interface (LP: #1350235) - - ERROR Nonce already used (LP: #1190986) - - Add MAAS arm64/xgene support (LP: #1338851) - - API documentation for nodegroup op=details missing parameter - (LP: #1331982) - - Reduce number of celery tasks emitted when updating a cluster controller - (LP: #1324944) - - Fix VirshSSH template which was referencing invalid attributes - (LP: #1324966) - - Fix a start up problems where a database lock was being taken outside of - a transaction (LP: #1325640, LP: #1325759) - - Reformat badly formatted Architecture error message (LP: #1301465) - - Final changes to support ppc64el (now known as PowerNV) (LP: #1315154) - - UI tweak to make navigation elements visible for documentation + * debian/control: Make maas-dns a Dependy of maas-region-controller. + * debian/maas-region-controller.postinst: Ensure DNS config migration is + always run. (LP: #1413388) - [ Greg Lutostanski ] - * debian/control: - - maas-provisioningserver not maas-cluster-controller depends on - python-pexpect (LP: #1352273) + -- Andres Rodriguez Fri, 10 Jul 2015 13:47:40 -0400 - [ Gavin Panella ] - * debian/maas-cluster-controller.postinst - - Allow maas-pserv to bind to all IPv6 addresses too. (LP: #1342302) +maas (1.7.6+bzr3376-0ubuntu1) wily; urgency=medium - [ Diogo Matsubara ] - * debian/control: - - python-maas-provisioningserver depends on python-paramiko (LP: #1334401) + * New upstream release 1.7.6 bzr3376: + - Accept list of forwarders for upstream_dns rather than just + one. (LP: #1470585) + - Fix upgrade issue where it would remove custom DNS config, + potentially breaking DNS. (LP: #1413388) [ Raphaël Badin ] - * debian/extras/99-maas-sudoers: - - Add rule 'maas-dhcp-server stop' job. + * Drop dependency on python-iscpy: the code has been integrated into + MAAS. (LP: #1413388). + + [ Andres Rodriguez ] + * Refactor maas-dns upgrade code so it doesn't break local DNS config + and it gets migrated (LP: #1413388) + + -- Andres Rodriguez Fri, 03 Jul 2015 00:11:50 -0400 + +maas (1.7.5+bzr3369-0ubuntu1) vivid; urgency=medium + + * New upstream release, MAAS 1.7.5 bzr3369 (LP: #1460737): + - MAAS cli/API: missing option set use-fast-installer / + use-debian-installer (LP: #1456969) + + -- Andres Rodriguez Mon, 11 May 2015 12:43:31 +0100 + +maas (1.7.4+bzr3366-0ubuntu1) vivid; urgency=medium + + * New upstream release, MAAS 1.7.4 bzr3365: + - When MAAS has too many leases, and lease parsing fails, MAAS fails + to auto-map NIC with network. (LP: #1387859) + - Alert a command-line user of `maas` when their local API description + is out-of-date. (LP: #1329267) + + -- Andres Rodriguez Sat, 09 May 2015 12:07:15 +0100 + +maas (1.7.3+bzr3363-0ubuntu2) vivid; urgency=medium + + * Use python-django16 instead of python-django ( 1.7). MAAS does not + support django 1.7 due to having a new migration system. (LP: #1447573) + * Adding systemd units and packaging support. (LP: #1423613) + * debian/control: Add missing dependency on python-apt. (LP: #1446878) + + -- Andres Rodriguez Wed, 22 Apr 2015 12:10:50 -0400 + +maas (1.7.3+bzr3363-0ubuntu1) vivid; urgency=medium + + * New upstream bugfix release, 1.7.3 bzr3363: + - Fix sorting by MAC address on node listing page (LP: #1437094) + - Automatically set correct boot resources and start re-import + so MAAS is usable after upgrade from 1.5. (LP: #1439359) + - Maintain backwards compatibility with MAAS 1.5 (custom) preseeds names + so users don't have to manually change preseed names. (LP: #1439366) + - Expose the version and the subversion of the running MAAS + on the API. (LP: #1441133) + - Fix Internal Server Error when saving a cluster without specifying + Router IP. (LP: #1441933) + - debian/patches/02-pserv-config.patch: Ensure pserv config file has a + default. This will also fix (LP: #1273197) + + -- Andres Rodriguez Tue, 24 Mar 2015 15:56:10 -0400 + +maas (1.7.2+bzr3355-0ubuntu1) trusty; urgency=medium + + * New upstream release, 1.7.2 bzr3355: + - Support AMT Version > 8 (LP: #1331214) + - Fix call to amttool when restarting a node to not fail + disk erasing. (LP: #1397567) + - Do not generate the 'option routers' stanza if router IP + is None. (LP: #1415538) + - Do not deallocate StaticIPAddress before node has powered + off. (LP: #1403909) + - Remove all OOPS reporting. (LP: #1405998) + - Update node host maps when a sticky ip address is claimed + over the API. (LP: #1423931) + * debian/control: + - Depends on ubuntu-cloudimage-keyring for region (LP: #1424287) + - Depends on pxelinux instead of syslinux-dev (LP: #1433697) + * Drop dependencies on python-oops* and add dependency to python-bson. + + -- Andres Rodriguez Fri, 30 Jan 2015 11:58:07 +0000 + +maas (1.7.1+bzr3341-0ubuntu1) vivid; urgency=medium + + * New upstream release, 1.7.1 bzr3341: + - Fix crash when trying to add an empty network. (LP: #1414036) + - Fix Add Node WebUI Page to not display OS/Release if + not populated (LP: #1413030) + - Fix Power8 VM enlistment to correctly by correctly parsing the + MAC address. (LP: #1409952) + - Fix IP Address reservation to not fail when trying to reserve + an IP address. (LP: #1409852) + - Fix probe-and-enlist locking up cluster (LP: #1403609) + - Fix slow node event log queries (LP: #1402237) + - Fix marking settings page as active when viewing node page + (LP: #1403043) + - Fix broken zone link on node page (LP: #1402736) + - Fix cluster process from uses high levels of CPU on web + requests (LP: #1401707) + - Update API documentation for error codes (LP: #1391193) + - Do not query broken nodes power state (LP: #1402243) + - Display MAC address node booted from on node listing + (LP: #1399676) + - Show node memory size in MiB or GiB (LP: #1399736) + - Fix deadlock caused by updating nodes PXE MAC address + on every PXE request (LP: #1401983) + - Fix auto creation of networks using the interface name + instead of the cluster name (LP: #1400909) + - Fix node page to display memory in GiB (LP: #1401349) + - Fix to maas not knowing about VM paused state (LP: #1394382) + - Fix CustomOS image by returning the correct filename and path + (LP: #1401241, LP: #1393953) + - Don't query for node info when we don't have a MAC address when + accessing the TFTP server from local machine. (LP: #1398159) + - Ensure PXE MAC address is the first reported to MAAS on + enlistment (LP: #1400849) + - Do not use poweroff.c32/poweroff.com as some systems don't + support it. Instead use the ephemeral environment to issue a + poweroff command. (LP: #1376716) + - Pipe script commands to /bind/sh instead of using shell=True + to prevent showing error output of px ax (LP: #1399331) + - Do not delete node's connected to a cluster interface, if + the latter gets removed. (LP: #1396308) + - Ensure disk erasing always runs the commissioning + ephemeral image (LP: #1397193) + - Add capability to use an option prefix_filer parameter to + probe_and_enlist_hardware when the model is virsh (LP: #1393423) + - Add capability to query power status for sm15k (LP: #1384424) + - Add capability to query power status for UCSM (LP: #1384426) + - Ensure WSGI has a home defined for the user. (LP: #1399016) + + [ Jeroen Vermeulen ] + * debian/maas-cluster-controller.postinst: configure MAAS_URL through new + "maas-provision configure-maas-url" sub-command instead of with "sed". + + [ Andres Rodriguez ] + * Set 'maas' homedir to '/var/lib/maas' (LP: #1399016): + - debian/maas-common.postinst: Do not try to update 'maas' + home user dir on upgrade. + * debian/control: Tighten Dependency Versioning of some packages. + + -- Andres Rodriguez Fri, 30 Jan 2015 11:47:16 +0000 + +maas (1.7.0+bzr3299-0ubuntu1) utopic; urgency=medium + + * New upstream release, 1.7.0. + + -- Andres Rodriguez Fri, 14 Nov 2014 08:38:39 -0500 + +maas (1.7.0~rc3+bzr3299-0ubuntu1) utopic; urgency=medium + + * New upstream release, RC3 bzr 3299 + - Fix dashboard tooltip messages which had stopped being displayed + (LP: #1390434) + + -- Julian Edwards Tue, 11 Nov 2014 12:23:07 +1000 + +maas (1.7.0~rc2+bzr3297-0ubuntu1) utopic; urgency=medium + + * New upstream release, RC2 bzr 3297 + - Allow marking a node in the DISK_ERASING state as broken (LP: #1388919) + - Prevent concurrent allocations of static IP addresses from returning + the same clashing address. (LP: #1387262) + + [ Andres Rodriguez ] + * debian/maas-cluster-controller.postinst: Make sure that cluster sets + the correct permissions for proxy log dir if running on the same + system. (LP: #1377964) + + -- Julian Edwards Fri, 07 Nov 2014 09:30:52 +1000 + +maas (1.7.0~rc1+bzr3295-0ubuntu1) trusty; urgency=medium + + * New Upstream Release, RC1 bzr 3295 + - If using MSCM (Moonshot Chassis Manager) on a M300, do not overwrite + power parameters (LP: #1382075) + - Do not write DNS hostmaps if its PXE Mac is not on a managed cluster + interface. (LP: #1382108) + - Show os field in boot source selections API. (LP: #1384383) + - Add documentation for new Import Images features (LP: #1384010) + - Allow releasing if Failed to Disk Wipe or Release (LP: #1384821) + - Handle missing subarch when creating a node via RPC for + probe-and-enlist methods (LP: #1384778) + - Update default waiting policy for power actions to increase time of + wait to handle different type of BMC's (LP: #1384758) + - Use HTTP_PROXY to download images if configured (LP: #1384464) + - Return 503 response for PowerActionAlreadyInProgress and add a + 'Retry-after' Header. (LP: #1384001) + - Stop ImportResourcesService from crashing when an import fails + (LP: #1386722) + - Convert TFTP request paths that contain backslashes to forward + slashes. (LP: #1387191) + - Do not try to delete synced boot images if not new ones are present + to handle a weird race. (LP: #1387133) + - Fix storage unit shown on the node edit page (LP: #1387431) + - Use CompressedAmpList in UpdateLeases to now parse + ~3500 leases (LP: #1387515, LP: #1387515) + - Updated XPath expression for discovering storage capacity from new + lshw changes. (LP: #1387380) + - Add sub status field on API. + - Generate DNS mappings for the Dynamic IP range. Hostname generation + is limited to /16. (LP: #1382190) + - Use configured HTTP Proxy to download boot images (LP: #1384464) + * debian/extras/maas-proxy-common.sh: Ensure that permissions are correct + before starting maas-proxy. (LP: #1382266) + + -- Andres Rodriguez Mon, 27 Oct 2014 21:53:43 -0400 + +maas (1.7.0~beta8+bzr3272-0ubuntu1) utopic; urgency=medium + + * New Upstream Release, Beta 8 bzr 3272. + - Maintain backward compatibility with status codes to not break + client MAAS API users. (LP: #1383609) + - Allow users to disable DHCP NIC scanning during commissioning otherwise + this can lead machines not being able to commission (LP: #1383384) + - Ensure that MAAS can correctly download, and install HWE Kernels, and + not crash in the process. (LP: #1357532, LP: #1382281) + - Add a lock to ensure we don't give the same 2 systems to two different + users when acquiring a machine. (LP: #1382575) + + [ Michael McCracken ] + * debian/maas-dns.postrm: ensure named.conf is cleaned of maas + includes (LP: #1346538) + + -- Andres Rodriguez Wed, 22 Oct 2014 12:55:08 -0400 + +maas (1.7.0~beta7+bzr3266-0ubuntu1) utopic; urgency=medium + + * New Upstream Snapshot, Beta 7 bzr3266 + + [ Jeroen Vermeulen ] + * debian/extras/99-maas-sudoers + debian/maas-dhcp.postinst + debian/rules + - Add second DHCP server instance for IPv6. + * debian/maas-region-controller-min.install + debian/maas-region-controller-min.lintian-overrides + - Install deployment user-data: maas_configure_interfaces.py script. + * debian/maas-cluster-controller.links + debian/maas-cluster-controller.install + debian/maas-cluster-controller.postinst + - Reflect Celery removal changes made in trunk r3067. + - Don't install celeryconfig_cluster.py any longer. + - Don't install maas_local_celeryconfig_cluster.py any longer. + - Don't symlink maas_local_celeryconfig_cluster.py from /etc to /usr. + - Don't insert UUID into maas_local_celeryconfig_cluster.py. + + [ Andres Rodriguez ] + * debian/maas-region-controller-min.postrm: Cleanup lefover files. + * debian/maas-dhcp.postrm: Clean leftover configs. + * Provide new maas-proxy package that replaces the usage of + squid-deb-proxy: + - debian/control: New maas-proxy package that replaces the usage + of squid-deb-proxy; Drop depends on squid-deb-proxy. + - Add upstrart job. + - Ensure squid3 is stopped as maas-proxy uses a caching proxy. + * Remove Celery references to cluster controller: + - Rename upstart job from maas-pserv to maas-cluster; rename + maas-cluster-celery to maas-cluster-register. Ensure services + are stopped on upgrade. + - debian/maintscript: Cleanup config files. + - Remove all references to the MAAS celery daemon and config + files as we don't use it like that anymore + * Move some entries in debian/maintscript to + debian/maas-cluster-controller.maintscript + * Remove usage of txlongpoll and rabbitmq-server. Handle upgrades + to ensure these are removed correctly. + + [ Jason Hobbs ] + * debian/maas-region-controller-min.install: Install + maas-generate-winrm-cert script. + + [ Raphaël Badin ] + * debian/extras/maas-region-admin: Bypass django-admin as it prints + spurious messages to stdout (LP: #1365130). + + [Louis Bouchard] + * debian/maas-cluster-controller.postinst: + - Exclude /var/log/maas/rsyslog when changing ownership + (LP: #1346703) + + [Gavin Panella] + * debian/maas-cluster-controller.maas-clusterd.upstart: + - Don't start-up the cluster controller unless a shared-secret has + been installed. + * debian/maas-cluster-controller.maas-cluster-register.upstart: Drop. - -- Greg Lutostanski Fri, 29 Aug 2014 13:27:34 -0500 + -- Andres Rodriguez Thu, 21 Aug 2014 19:36:30 -0400 -maas (1.5.2+bzr2282-0ubuntu0.2) trusty-proposed; urgency=medium +maas (1.7.0~beta1+bzr2781-0ubuntu1) utopic; urgency=medium + * New upstream release, 1.7.0 Beta 1 + + [Diogo Matsubara] * debian/control: - - Add missing dependency in maas-cluster-controller for grub-common - (LP: #1328231) - - Move dependency from maas-cluster-controller to maas-provisioningserver - for python-seamicroclient (LP: #1332532) - - -- Greg Lutostanski Fri, 20 Jun 2014 10:10:47 -0500 - -maas (1.5.2+bzr2282-0ubuntu0.1) trusty-proposed; urgency=medium - - * New upstream release: - - Remove workaround for fixed Django bug 1311433 (LP: #1311433) - - Ensure that validation errors are returned when adding a node over - the API and its cluster controller is not contactable. (LP: #1305061) - - Hardware enablement support for PowerKVM (LP: #1325038) - - Shorten the time taken for a cluster to initially connect to the region - via RPC to around 2 seconds (LP: #1317682) - - Faster DHCP leases parser (LP: #1305102) - - Documentation fixed explaining how to enable an ephemeral backdoor - (LP: #1321696) - - Use probe-and-enlist-hardware to enlist all virtual machine inside - a libvirt machine, allow password qemu+ssh connections. - (LP: #1315155, LP: #1315157) - - Rename ppc64el boot loader to PowerKVM (LP: #1315154) - - Fix NodeForm's is_valid() method so that it uses Django's way of setting - errors on forms instead of putting text in self.errors['architecture'] - (LP: #1301465) - - Change BootMethods to return their own IReader per-request, update method - names to reflect new usage. (LP: #1315154) - - Return early and stop the DHCP server when the list of managed interfaces - of the nodegroup is empty. (LP: #1324944) - - Fix invalid attribute references in the VirshSSH class. Added more test - for the VirshSSH class. (LP: #1324966) + - maas-cluster-controller depends on syslinux-dev | + syslinux-common (LP: #1328659) + - python-maas-provisioningserver depends on + python-paramiko (LP: #1334401) + + [Jeroen Vermeulen] + * debian/extras/99-maas-sudoers: + - Let maas user import, including sudo tgt-admin and sudo uec2roottar. + * debian/maas-cluster-controller.install: + - Stop installing obsolete file bootresources.yaml. + + [ Raphaël Badin ] * debian/control: - - Add missing dependency in maas-cluster-controller for python-pexpect - (LP: #1322151) + - maas-cluster-controller depends on python-pexpect + * debian/extras/99-maas-sudoers: + - Add rule 'maas-dhcp-server stop' job. - -- Greg Lutostanski Wed, 04 Jun 2014 14:31:41 -0500 + [ Greg Lutostanski ] + * debian/control: + - maas-cluster-controller depends on grub-common + - maas-provisioningserver not maas-cluster-controller depends on + python-pexpect (LP: #1352273) + - maas-provisioningserver not maas-cluster-controller depends on + python-seamicroclient (LP: #1332532) -maas (1.5.1+bzr2269-0ubuntu0.1) trusty; urgency=medium + [ Gavin Panella ] + * debian/maas-cluster-controller.postinst + - Allow maas-pserv to bind to all IPv6 addresses too. - * Stable Release Update (LP: #1317601): - - Hardware Enablement for Cisco B-Series. (LP: #1300476) - - Allow AMT power type to specify IP Address. (LP: #1308772) - - Spurious failure when starting and creating lock files. (LP: 1308069) - - Fix usage of hardware enablement kernels by fixing the preseeds - (LP: #1310082, LP: #1310076, LP: #1310082) - - Fix parallel juju deployments. (LP: #1314409) - - Clear distro_series when stopping node from WebUI (LP: #1316396) - - Fix click hijacking (LP: #1298784) - - Fix blocking API client when deleting a resource (LP: #1313556) - - Do not import Trusty RC images by default (LP: #1311151) - - debian/control: Add missing dep on python-crochet for - python-maas-provisioningserver (LP: #1311765) + [ Julian Edwards ] + * debian/maas-region-controller-min.apport + debian/maas-region-controller-min.logrotate + debian/maas-region-controller-min.postinst + debian/maas-region-controller.postinst + - Change the log file name maas.log to maas-django.log + * debian/maas-cluster-controller.postinst + debian/maas-common.install + debian/maas-region-controller-min.postinst + debian/maas-region-controller.postinst + - Install /var/log/maas/maas.log as a syslog file. + - Ensure logging is set up for upgrades + + [ Graham Binns ] + * debian/maas-region-controller.postinst: + - Add symlinks for squid3, squid-deb-proxy and apache log directories to + /var/log/maas. + + [ Andres Rodriguez ] + * debian/maas-region-controller.postinst: Force symlink creation + for external logs. + * debian/maas-region-controller.postinst: Do not change celery's + rabbitmq password on upgrade that to not lock remote + Cluster Controllers if upgrading from 1.5+. (LP: #1300507) - -- Andres Rodriguez Fri, 09 May 2014 22:35:43 -0500 + -- Andres Rodriguez Thu, 21 Aug 2014 14:05:40 -0400 maas (1.5+bzr2252-0ubuntu1) trusty; urgency=medium diff -Nru maas-1.5.4+bzr2294/debian/control maas-1.7.6+bzr3376/debian/control --- maas-1.5.4+bzr2294/debian/control 2014-09-19 20:32:28.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/control 2015-07-10 17:47:33.000000000 +0000 @@ -4,6 +4,7 @@ Maintainer: Ubuntu Developers Build-Depends: debhelper (>= 8.1.0~), dh-apport, + dh-systemd, po-debconf, python (>= 2.7), python-distribute, @@ -14,7 +15,10 @@ Package: maas Architecture: all -Depends: ${misc:Depends}, maas-cluster-controller, maas-region-controller, python-django-maas +Depends: ${misc:Depends}, + maas-cluster-controller (= ${binary:Version}), + maas-region-controller (= ${binary:Version}), + python-django-maas (= ${binary:Version}) Description: MAAS server all-in-one metapackage MAAS is "Metal As A Service", a system for dynamic provisioning of physical servers that has many of the same characteristics as a cloud. You register @@ -52,15 +56,16 @@ libjs-yui3-full, libjs-yui3-min, maas-common (= ${binary:Version}), - pwgen, - python-django (>= 1.4), + python-django16 | python-django (<< 1.7), + python-django, python-django-maas (= ${binary:Version}), python-django-piston, python-django-south, python-maas-provisioningserver (= ${binary:Version}), python-djorm-ext-pgarray, rsyslog, - squid-deb-proxy, + ubuntu-cloudimage-keyring, + maas-proxy (= ${binary:Version}), ${misc:Depends}, ${python:Depends} Breaks: maas-region-controller ( <= 1.4+bzr1693+dfsg-0ubuntu3 ) @@ -72,12 +77,12 @@ Package: maas-region-controller Architecture: all -Depends: maas-region-controller-min (= ${binary:Version}), +Depends: maas-dns (= ${binary:Version}), + maas-region-controller-min (= ${binary:Version}), postgresql (>= 9.1), - rabbitmq-server, ${misc:Depends}, ${python:Depends} -Recommends: openssh-server, maas-dns (= ${binary:Version}) +Recommends: openssh-server Description: MAAS server complete region controller This package installs and configures the complete MAAS region controller, including the minimum setup, plus the database server and messaging queue. @@ -85,7 +90,7 @@ Package: python-maas-provisioningserver Section: python Architecture: all -Depends: python-amqp, +Depends: python-bson, python-celery, python-crochet, python-distro-info, @@ -93,12 +98,8 @@ python-jsonschema, python-lockfile, python-lxml, - python-maas-client, + python-maas-client (= ${binary:Version}), python-netifaces, - python-oops, - python-oops-amqp, - python-oops-datedir-repo, - python-oops-twisted, python-paramiko, python-pexpect, python-pyparsing, @@ -115,7 +116,6 @@ ${python:Depends} Breaks: python-django-maas ( <= 0.1+bzr1048+dfsg-0ubuntu1 ), maas-cluster-controller (<= 0.1+bzr1243+dfsg-0ubuntu3), maas-dhcp (<= 1.4+bzr1817+dfsg-0ubuntu1) Replaces: python-django-maas ( <= 0.1+bzr1048+dfsg-0ubuntu1 ), maas-cluster-controller (<= 0.1+bzr1243+dfsg-0ubuntu3), maas-dhcp (<= 1.4+bzr1817+dfsg-0ubuntu1) -Conflicts: python-librabbitmq Description: MAAS server provisioning libraries This package provides the MAAS provisioning server python libraries. @@ -154,8 +154,6 @@ maas-cli (=${binary:Version}), maas-common (=${binary:Version}), maas-dhcp (=${binary:Version}), - python-amqp, - python-celery, python-httplib2, python-lockfile, python-maas-provisioningserver (=${binary:Version}), @@ -165,6 +163,7 @@ python-twisted, python-zope.interface, rsyslog, + pxelinux | syslinux-common (<< 3:6.00~pre4+dfsg-5), syslinux-common, tgt, ubuntu-cloudimage-keyring, @@ -173,7 +172,7 @@ ${misc:Depends}, ${python:Depends} Suggests: ipmitool, libvirt-bin, amtterm -Conflicts: tftpd-hpa, python-librabbitmq +Conflicts: tftpd-hpa Breaks: maas ( <= 0.1+bzr1048+dfsg-0ubuntu1 ), maas-region-controller ( <= 0.1+bzr1314+dfsg-0ubuntu1 ) Replaces: maas ( <= 0.1+bzr1048+dfsg-0ubuntu1 ), maas-region-controller ( <= 0.1+bzr1314+dfsg-0ubuntu1 ) Description: MAAS server cluster controller @@ -185,6 +184,8 @@ Section: python Architecture: all Depends: python-amqp, + python-apt, + python-bson, python-celery, python-convoy, python-crochet, @@ -193,21 +194,14 @@ python-lxml, python-maas-client (= ${binary:Version}), python-netaddr, - python-oops, - python-oops-amqp, - python-oops-datedir-repo, - python-oops-twisted, - python-oops-wsgi, python-psycopg2, python-sphinx, python-tempita, python-twisted, python-txamqp, - python-txlongpoll, python-zope.interface, ${misc:Depends}, ${python:Depends} -Conflicts: python-librabbitmq Description: MAAS server Django web framework This package provides the Django web framework for MAAS. @@ -224,7 +218,6 @@ Architecture: all Depends: bind9, maas-region-controller-min (= ${binary:Version}), - python-iscpy, ${misc:Depends} Conflicts: dnsmasq Breaks: maas-dhcp (<= 0.1+bzr777+dfsg-0ubuntu1) @@ -232,3 +225,14 @@ Description: MAAS DNS server This package installs and configures a DNS server that can be used by MAAS, and enhances the overall MAAS user experience. + +Package: maas-proxy +Architecture: all +Depends: ${python:Depends}, + ${misc:Depends}, + squid3 +Conflicts: squid-deb-proxy +Replaces: squid-deb-proxy +Description: MAAS Caching Proxy + This package installs and configures a Caching Proxy server that can be + used by MAAS. It enhances the overall MAAS user experience. diff -Nru maas-1.5.4+bzr2294/debian/extras/99-maas-sudoers maas-1.7.6+bzr3376/debian/extras/99-maas-sudoers --- maas-1.5.4+bzr2294/debian/extras/99-maas-sudoers 2014-09-19 20:32:28.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/extras/99-maas-sudoers 2015-07-10 01:27:33.000000000 +0000 @@ -1,4 +1,6 @@ -maas ALL= NOPASSWD: /usr/sbin/service maas-dhcp-server restart -maas ALL= NOPASSWD: /usr/sbin/service maas-dhcp-server stop +maas ALL= NOPASSWD: /usr/sbin/service maas-dhcpd restart +maas ALL= NOPASSWD: /usr/sbin/service maas-dhcpd6 restart +maas ALL= NOPASSWD: /usr/sbin/service maas-dhcpd stop +maas ALL= NOPASSWD: /usr/sbin/service maas-dhcpd6 stop maas ALL= NOPASSWD: /usr/sbin/maas-provision -maas ALL= NOPASSWD: SETENV: /usr/sbin/maas-import-pxe-files +maas ALL= NOPASSWD: SETENV: /usr/sbin/maas-import-pxe-files, /usr/sbin/tgt-admin, /usr/bin/uec2roottar diff -Nru maas-1.5.4+bzr2294/debian/extras/maas-proxy-common.sh maas-1.7.6+bzr3376/debian/extras/maas-proxy-common.sh --- maas-1.5.4+bzr2294/debian/extras/maas-proxy-common.sh 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/extras/maas-proxy-common.sh 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,34 @@ +pre_start() { + if [ -x /usr/sbin/squid ]; then + SQUID=/usr/sbin/squid + elif [ -x /usr/sbin/squid3 ]; then + SQUID=/usr/sbin/squid3 + else + echo "No squid binary found" + exit 1 + fi + + # ensure all cache dirs are there + install -d -o proxy -g proxy -m 750 /var/cache/maas-proxy/ + install -d -o proxy -g proxy -m 750 /var/log/maas/proxy/ + install -m 750 -o proxy -g proxy -d /var/spool/maas-proxy/ + if [ -d /var/log/maas/proxy ]; then + chown -R proxy:proxy /var/log/maas/proxy + fi + if [ ! -d /var/cache/maas-proxy/00 ]; then + $SQUID -z -N -f /etc/maas/maas-proxy.conf + fi +} + +# from the squid3 debian init script +find_cache_dir () { + w=" " # space tab + res=`sed -ne ' + s/^'$1'['"$w"']\+[^'"$w"']\+['"$w"']\+\([^'"$w"']\+\).*$/\1/p; + t end; + d; + :end q' < $CONFIG` + [ -n "$res" ] || res=$2 + echo "$res" +} + diff -Nru maas-1.5.4+bzr2294/debian/extras/maas-proxy.conf maas-1.7.6+bzr3376/debian/extras/maas-proxy.conf --- maas-1.5.4+bzr2294/debian/extras/maas-proxy.conf 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/extras/maas-proxy.conf 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,37 @@ +# Inspired by UDS's conference proxy + +acl maas_proxy_manager proto cache_object +acl localhost src 127.0.0.1/32 ::1 +acl to_localhost dst 127.0.0.0/8 0.0.0.0/32 ::1 +acl localnet src all # TODO: We should auto-generate this with the networks MAAS manages/knows about. +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 1025-65535 # unregistered ports +acl CONNECT method CONNECT +http_access allow maas_proxy_manager localhost +http_access deny maas_proxy_manager +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localnet +http_access allow localhost +http_access deny all +http_port 3128 transparent +http_port 8000 +coredump_dir /var/spool/maas-proxy +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880 +refresh_pattern . 0 20% 4320 +forwarded_for delete +visible_hostname maas.proxy # TODO: This should be autogenerated by a template. +cache_mem 512 MB +minimum_object_size 0 MB +maximum_object_size 1024 MB +maximum_object_size_in_memory 100 MB +cache_dir aufs /var/spool/maas-proxy 40000 16 256 +# use different logs +cache_access_log /var/log/maas/proxy/access.log +cache_log /var/log/maas/proxy/cache.log +cache_store_log /var/log/maas/proxy/store.log diff -Nru maas-1.5.4+bzr2294/debian/extras/maas-region-admin maas-1.7.6+bzr3376/debian/extras/maas-region-admin --- maas-1.5.4+bzr2294/debian/extras/maas-region-admin 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/extras/maas-region-admin 2015-07-10 01:27:33.000000000 +0000 @@ -1,8 +1,23 @@ -#!/bin/sh -if [ "$(id -u)" != "0" ]; then - echo "This utility may only be run as root." 1>&2 - exit 1 -fi -export PYTHONPATH="/usr/share/maas${PYTHONPATH:+:}${PYTHONPATH}" -export DJANGO_SETTINGS_MODULE="maas.settings" -exec /usr/bin/django-admin "$@" +#!/usr/bin/python +import os +import sys + +user_id = os.getuid() +if user_id != 0: + print("This utility may only be run as root.") + sys.exit(1) + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.settings") +sys.path.append('/usr/share/maas') + +# Use Django 1.6 if the python-django16 package is installed: this is +# to get MAAS to work on vivid: vivid ships with Django 1.7 by default +# and MAAS isn't yet compatible with Django 1.7. +if os.path.exists('/usr/lib/django16'): + sys.path.insert(1, '/usr/lib/django16') + + +from django.core import management + +if __name__ == "__main__": + management.execute_from_command_line() diff -Nru maas-1.5.4+bzr2294/debian/extras/maas-region-celeryd maas-1.7.6+bzr3376/debian/extras/maas-region-celeryd --- maas-1.5.4+bzr2294/debian/extras/maas-region-celeryd 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/extras/maas-region-celeryd 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -#!/usr/bin/python -import os -from grp import getgrnam -from pwd import getpwnam - -def start_celery(args): - uid = getpwnam(args.user).pw_uid - gid = getgrnam(args.group).gr_gid - - env = dict(os.environ, PYTHONPATH="/usr/share/maas") - - command = [ - 'celeryd', - '--logfile=%s' % args.logfile, - '--schedule=%s' % args.schedule, - '--loglevel=INFO', - '--beat', - '--queues=celery,master', - ] - - # Change gid first, just in case changing the uid might deprive - # us of the privileges required to setgid. - os.setgid(gid) - os.setuid(uid) - - os.execvpe(command[0], command, env=env) - -def main(): - import argparse - parser = argparse.ArgumentParser( - description='MAAS celery daemon config options') - parser.add_argument( - '--user', '-u', metavar='USER', default='maas', - help="System user identity that should run the cluster controller.") - parser.add_argument( - '--group', '-g', metavar='GROUP', default='maas', - help="System group that should run the cluster controller.") - parser.add_argument( - '--logfile', '-l', metavar='LOGFILE', default='/var/log/maas/celery-region.log', - help="Location of the logfile.") - parser.add_argument( - '--schedule', '-s', metavar='SCHEDULE', default='/var/lib/maas/celerybeat-region-schedule', - help="Location of the beat schedule file.") - - args = args = parser.parse_args() - - start_celery(args) - -if __name__ == '__main__': - main() diff -Nru maas-1.5.4+bzr2294/debian/extras/squid3.override maas-1.7.6+bzr3376/debian/extras/squid3.override --- maas-1.5.4+bzr2294/debian/extras/squid3.override 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/extras/squid3.override 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1 @@ +manual diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.install maas-1.7.6+bzr3376/debian/maas-cluster-controller.install --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.install 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.install 2015-07-10 01:27:33.000000000 +0000 @@ -1,19 +1,12 @@ # Install twisted plugins twisted/plugins/maasps.py usr/share/pyshared/twisted/plugins -# Install PSERV config files +# Install PSERV config file debian/tmp/etc/maas/pserv.yaml -debian/tmp/etc/maas/bootresources.yaml # Install cluster config file debian/tmp/etc/maas/maas_cluster.conf -# Install celery config file -debian/tmp/usr/share/maas/celeryconfig_cluster.py - -# Install local celery cluster config file -debian/tmp/etc/maas/maas_local_celeryconfig_cluster.py - # Install templates debian/tmp/etc/maas/templates/dhcp debian/tmp/etc/maas/templates/power diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.links maas-1.7.6+bzr3376/debian/maas-cluster-controller.links --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.links 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.links 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -etc/maas/maas_local_celeryconfig_cluster.py usr/share/maas/maas_local_celeryconfig_cluster.py diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.logrotate maas-1.7.6+bzr3376/debian/maas-cluster-controller.logrotate --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.logrotate 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.logrotate 2015-07-10 01:27:33.000000000 +0000 @@ -4,6 +4,6 @@ compress missingok postrotate - [ -r /var/run/maas-pserv.pid ] && kill -s USR2 $(cat /var/run/maas-pserv.pid) || true + [ -r /var/run/maas-cluster.pid ] && kill -s USR2 $(cat /var/run/maas-cluster.pid) || true endscript } diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-cluster-celery.upstart maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-cluster-celery.upstart --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-cluster-celery.upstart 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-cluster-celery.upstart 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -# maas-cluster-controller - provisioning service -# -# MAAS Cluster Controller Service - -description "MAAS Cluster Controller" -author "Julian Edwards " - -start on filesystem and net-device-up -stop on runlevel [016] - -env CONFIG_FILE=/etc/maas/maas_cluster.conf -# Use cluster config. -env CELERY_CONFIG_MODULE="celeryconfig_cluster" - -pre-start script - if [ ! -f $CONFIG_FILE ]; then - echo "$CONFIG_FILE does not exist. Aborting." - stop - exit 0 - fi -end script - -script - # Prepare settings. - . $CONFIG_FILE - # Allow the cluster-controller process to read CLUSTER_UUID as set - # in that config file. - export CLUSTER_UUID - exec /usr/bin/authbind --deep /usr/sbin/maas-provision start-cluster-controller $MAAS_URL -u maas -g maas -end script diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-clusterd.service maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-clusterd.service --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-clusterd.service 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-clusterd.service 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,21 @@ +[Unit] +Description=MAAS Cluster Controller +Requires=network-online.target +After=network-online.target +ConditionPathExists=/etc/maas/maas_cluster.conf +# This (pserv.yaml) will go away once streamline +# config files or move to sqlite. +ConditionPathExists=/etc/maas/pserv.yaml +ConditionPathExists=/var/lib/maas/secret + +[Service] +Restart=on-failure +EnvironmentFile=/etc/maas/maas_cluster.conf +ExecStart=/bin/sh -ec '\ + exec /usr/bin/authbind --deep /usr/bin/twistd \ + --nodaemon --uid=maas --gid=maas --pidfile=/run/maas-cluster.pid \ + maas-pserv --config-file=/etc/maas/pserv.yaml > \ + /var/log/maas/clusterd.log 2>&1' + +[Install] +WantedBy=multi-user.target diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-clusterd.upstart maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-clusterd.upstart --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-clusterd.upstart 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-clusterd.upstart 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,55 @@ +# maas-cluster - provisioning service +# +# MAAS Provisioning Service + +description "MAAS" +author "Andres Rodriguez " + +start on filesystem and net-device-up +stop on runlevel [016] + +respawn + +env CONFIG_FILE=/etc/maas/maas_cluster.conf + +pre-start script + if [ ! -f $CONFIG_FILE ]; then + echo "$CONFIG_FILE does not exist. Aborting." + stop + exit 0 + fi +end script + +script + # Exit immediately on error, and treat unset variables as errors. In + # sh/dash, unfortunately, the following does not cause the use of an unset + # variable to halt the script, it merely prints an error and sets $?. + set -o errexit -o nounset + # Load the configuration file. + . $CONFIG_FILE + # The MAAS cluster controller needs both CLUSTER_UUID and MAAS_URL to + # operate. + export CLUSTER_UUID + export MAAS_URL + # Check for the shared-secret. If it's not here, sleep for a while, then + # exit and allow respawn to do its thing. + if ! maas-provision check-for-shared-secret >/dev/null; then + fmt -w 72 <<-'EOF' >&2 + A shared secret has not been installed for this cluster. Obtain + the secret from the region (find it in /var/lib/maas/secret once + the region controller has started for the first time) and + install it with `maas-provision install-shared-secret`. + + However, if this machine is also serving as the region, ensure + that the region controller is started (`sudo service apache2 + start` typically). It will create the shared secret, which + maas-clusterd will then find when it respawns in 5 seconds. + + EOF + exec sleep 5 + fi + # To add options to your daemon, edit the line below: + exec /usr/bin/authbind --deep /usr/bin/twistd \ + --nodaemon --uid=maas --gid=maas --pidfile=/run/maas-cluster.pid \ + --logfile=/dev/null maas-pserv --config-file=/etc/maas/pserv.yaml +end script diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-pserv.upstart maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-pserv.upstart --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.maas-pserv.upstart 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.maas-pserv.upstart 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# maas-pserv - provisioning service -# -# MAAS Provisioning Service - -description "MAAS" -author "Andres Rodriguez " - -start on filesystem and net-device-up -stop on runlevel [016] - -respawn - -env CONFIG_FILE=/etc/maas/maas_cluster.conf - -pre-start script - if [ ! -f $CONFIG_FILE ]; then - echo "$CONFIG_FILE does not exist. Aborting." - stop - exit 0 - fi -end script - -script - # Prepare settings. - . $CONFIG_FILE - # Allow the tftpd process to read CLUSTER_UUID as set in that config - # file. - export CLUSTER_UUID - export MAAS_URL - # To add options to your daemon, edit the line below: - exec /usr/bin/authbind --deep /usr/bin/twistd -n --uid=maas --gid=maas --pidfile=/run/maas-pserv.pid --logfile=/dev/null maas-pserv --config-file=/etc/maas/pserv.yaml -end script diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.maintscript maas-1.7.6+bzr3376/debian/maas-cluster-controller.maintscript --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.maintscript 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.maintscript 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,6 @@ +rm_conffile /etc/maas/bootresources.yaml 1.7.0~beta1+bzr2781-0ubuntu1 +rm_conffile /etc/maas/maas_local_celeryconfig_cluster.py 1.7.0~beta3+bzr3043-0ubuntu1 +rm_conffile /etc/init/maas-cluster-celery.conf 1.7.0~beta3+bzr3043-0ubuntu1 +rm_conffile /etc/init/maas-pserv.conf 1.7.0~beta3+bzr3043-0ubuntu1 +rm_conffile /etc/init/maas-cluster.conf 1.7.0~beta6+bzr3231-0ubuntu1 +rm_conffile /etc/init/maas-cluster-register.conf 1.7.0~beta6+bzr3231-0ubuntu1 diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.postinst maas-1.7.6+bzr3376/debian/maas-cluster-controller.postinst --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.postinst 2015-01-09 11:49:25.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.postinst 2015-07-10 01:27:33.000000000 +0000 @@ -15,10 +15,20 @@ if [ ! -d /var/log/maas/oops ]; then mkdir -p /var/log/maas/oops fi + # Main syslog file. + if [ ! -f /var/log/maas/maas.log ]; then + touch /var/log/maas/maas.log + fi + # Give appropriate permissions chown -R maas:maas /var/lib/maas/ find /var/log/maas -not -user syslog -print0 | xargs -r0 chown maas:maas chmod -R 775 /var/log/maas/oops + chown syslog:syslog /var/log/maas/maas.log + # If proxy log dir exists, set correct permissions + if [ -d /var/log/maas/proxy ]; then + chown -R proxy:proxy /var/log/maas/proxy + fi } configure_maas_tgt() { @@ -31,40 +41,26 @@ extract_cluster_uuid(){ # Extract ClUSTER_UUID setting from config file $1. This will work - # both the cluster celery config (which is python) and the cluster - # config (which is shell). + # the cluster config (which is shell). sed -n -e "s/^CLUSTER_UUID *= *[\"']\([^\"']*\).*/\1/p" "$1" } configure_cluster_uuid(){ - # The cluster uuid goes into maas_cluster.conf, but we also still - # keep a copy in maas_local_celeryconfig_cluster.py (hopefully just - # temporarily). If an old uuid is configured, we replicate that to - # maas_cluster.conf; otherwise, we want to generate one. + # The cluster uuid goes into maas_cluster.conf. If an old uuid is + # configured, we replicate that to maas_cluster.conf; otherwise, + # we want to generate one. local uuid - if [ -n "$(extract_cluster_uuid /etc/maas/maas_cluster.conf)" ]; then # UUID is already set up. Wonderful. return - fi - - # Look for a UUID stored in the old location. - uuid="$(extract_cluster_uuid /etc/maas/maas_local_celeryconfig_cluster.py)" - - if [ -z "$uuid" ]; then - # No UUID at all yet. Generate one, and insert it into its - # placeholder in the old config location. + else + # No UUID at all yet. Generate one. uuid="$(uuidgen)" - sed -i "s|^CLUSTER_UUID = None$|CLUSTER_UUID = '$uuid'|" \ - /etc/maas/maas_local_celeryconfig_cluster.py fi - # Either way, at this point we have a uuid, and it is configured in - # the old config location. - # - # Write it to maas_cluster.conf as well. There is no initial - # placeholder in this file, so just append the setting. + # Write the uuid to maas_cluster.conf + # There is no initial placeholder in this file, so just append the setting. echo "CLUSTER_UUID=\"$uuid\"" >>/etc/maas/maas_cluster.conf } @@ -116,19 +112,22 @@ fi } -configure_pserv_generator(){ +configure_maas_url(){ # Get the MAAS_URL on configure/reconfigure and write it to the conf files. db_get maas-cluster-controller/maas-url || true if [ -n "$RET" ]; then - sed -i "s|MAAS_URL=.*|MAAS_URL=\"$RET\"|" /etc/maas/maas_cluster.conf - # Extract the hostname part. - HOSTPART=$(echo $RET|awk '{ split($0,array,"/")} END{print array[3] }') - # And substitute it in-place in pserv.yaml on an indented, non-commented - # line. - sed -ri "s|^([[:space:]]+)(#+[[:space:]]*)?(generator:[[:space:]]+https?://)[^:/]+|\1\3$HOSTPART|" /etc/maas/pserv.yaml + maas-provision configure-maas-url "$RET" fi } +configure_shared_secret() { + db_get maas-cluster-controller/shared-secret || true + if [ -n "$RET" ]; then + echo "$RET" | maas-provision install-shared-secret + chown maas:maas /var/lib/maas/secret + chmod 0640 /var/lib/maas/secret + fi +} if [ "$1" = "configure" ] && [ -z "$2" ]; then @@ -142,16 +141,19 @@ fi configure_maas_tgt - configure_pserv_generator - - # These config files may contain a private cluster UUID. Only maas - # can read them; only root can write them - chown root:maas \ - /etc/maas/maas_local_celeryconfig_cluster.py \ - /etc/maas/maas_cluster.conf - chmod 0640 \ - /etc/maas/maas_local_celeryconfig_cluster.py \ - /etc/maas/maas_cluster.conf + configure_maas_url + # Only ask for a shared secret when the region is not installed + # on the same system. + if [ -n "$DEBCONF_RECONFIGURE" ] && [ ! -f /usr/sbin/maas-region-admin ]; then + db_input high maas-cluster-controller/shared-secret + db_go + fi + configure_shared_secret + + # This config file may contain a private cluster UUID. Only maas + # can read it; only root can write it. + chown root:maas /etc/maas/maas_cluster.conf + chmod 0640 /etc/maas/maas_cluster.conf configure_cluster_uuid configure_cluster_authbind diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.postrm maas-1.7.6+bzr3376/debian/maas-cluster-controller.postrm --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.postrm 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -24,7 +24,6 @@ ;; esac # remove var directory - rm -rf /var/lib/maas/celerybeat-cluster-schedule DIR=/var/lib/maas if [ "$(ls -A $DIR 2> /dev/null)" = "" ]; then rm -rf /var/lib/maas diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.preinst maas-1.7.6+bzr3376/debian/maas-cluster-controller.preinst --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.preinst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.preinst 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,31 @@ +#!/bin/sh + +set -e + +. /usr/share/debconf/confmodule + +if [ "$1" = "upgrade" ]; then + + if dpkg --compare-versions "$2" lt 1.7.0~beta3+bzr3066-0ubuntu1; then + if [ -f /etc/init/maas-pserv.conf ]; then + invoke-rc.d maas-pserv stop + fi + + if [ -f /etc/init/maas-cluster-celery.conf ]; then + invoke-rc.d maas-cluster-celery stop + fi + + if [ -f /var/log/maas/celery.log ]; then + rm -rf /var/log/maas/celery.log + fi + fi + + if dpkg --compare-versions "$2" lt 1.7.0~beta6+bzr3232-0ubuntu1; then + if [ -f /etc/init/maas-cluster.conf ]; then + invoke-rc.d maas-cluster stop + fi + fi + +fi + +#DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-cluster-controller.templates maas-1.7.6+bzr3376/debian/maas-cluster-controller.templates --- maas-1.5.4+bzr2294/debian/maas-cluster-controller.templates 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-cluster-controller.templates 2015-07-10 01:27:33.000000000 +0000 @@ -2,6 +2,15 @@ Type: string Default: _Description: Ubuntu MAAS API address: - The MAAS Cluster Controller needs to contact the MAAS server to - register its presence. Set the URL to the MAAS API here, e.g. - http://192.168.1.1/MAAS + The MAAS cluster controller and nodes need to contact the MAAS region + controller API. Set the URL at which they can reach the MAAS API remotely, + e.g. "http://192.168.1.1/MAAS". + Since nodes must be able to access this URL, localhost or 127.0.0.1 are not + useful values here. + +Template: maas-cluster-controller/shared-secret +Type: password +Default: +_Description: MAAS Cluster Controller Shared Secret: + The MAAS Cluster Controller needs to contact the MAAS server + with a share secret. Set the shared secret here. diff -Nru maas-1.5.4+bzr2294/debian/maas-common.install maas-1.7.6+bzr3376/debian/maas-common.install --- maas-1.5.4+bzr2294/debian/maas-common.install 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-common.install 2015-07-10 01:27:33.000000000 +0000 @@ -1,2 +1 @@ -# Install Celery Config -debian/tmp/usr/share/maas/celeryconfig_common.py +debian/tmp/usr/share/maas/maas-rsyslog.conf diff -Nru maas-1.5.4+bzr2294/debian/maas-common.maintscript maas-1.7.6+bzr3376/debian/maas-common.maintscript --- maas-1.5.4+bzr2294/debian/maas-common.maintscript 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-common.maintscript 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,2 @@ +#TODO: Figure out the right version or whether this is needed. +#rm_conffile /usr/share/maas/celeryconfig_common.py 1.7.0~beta3+bzr3043-0ubuntu1 diff -Nru maas-1.5.4+bzr2294/debian/maas-common.postinst maas-1.7.6+bzr3376/debian/maas-common.postinst --- maas-1.5.4+bzr2294/debian/maas-common.postinst 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-common.postinst 2015-07-10 01:27:33.000000000 +0000 @@ -5,10 +5,13 @@ add_user_group(){ local user="maas" local group="maas" + local home="/var/lib/maas" addgroup --quiet --system "$group" || true - adduser --quiet --system --group --no-create-home "$user" || true + adduser --quiet --system --group --home "$home" "$user" || true } +ln -sf /usr/share/maas/maas-rsyslog.conf /etc/rsyslog.d/99-maas.conf + add_user_group #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-common.postrm maas-1.7.6+bzr3376/debian/maas-common.postrm --- maas-1.5.4+bzr2294/debian/maas-common.postrm 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-common.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -11,4 +11,7 @@ fi esac +rm -f /etc/rsyslog.d/99-maas.conf + + #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.apparmor maas-1.7.6+bzr3376/debian/maas-dhcp.apparmor --- maas-1.5.4+bzr2294/debian/maas-dhcp.apparmor 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.apparmor 2015-07-10 01:27:33.000000000 +0000 @@ -1,3 +1,7 @@ +# Work around bug: +# https://bugs.launchpad.net/ubuntu/+source/isc-dhcp/+bug/1186662 +capability dac_override, + /run/maas/dhcp/ r, /run/maas/dhcp/** r, /run/maas/dhcp/*.pid lrw, @@ -5,3 +9,4 @@ /run/maas/dhcp/*.leases* lrw, /var/lib/maas/dhcp/dhcpd*.leases* lrw, /etc/maas/dhcpd.conf r, +/etc/maas/dhcpd6.conf r, diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd6.service maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd6.service --- maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd6.service 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd6.service 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,27 @@ +[Unit] +Description=MAAS instance of ISC DHCP server for IPv6 +Documentation=man:dhcpd(8) +Wants=network-online.target +After=network-online.target +After=time-sync.target +ConditionPathExists=/etc/maas/dhcpd6.conf +ConditionPathExists=/var/lib/maas/dhcpd-interfaces + +[Service] +# Allow dhcp server to write lease and pid file as 'dhcpd' user +ExecStartPre=/bin/mkdir -p /run/maas/dhcp +ExecStartPre=/bin/chown root:root /run/maas/dhcp +# The leases files need to be root:root even when dropping privileges +ExecStartPre=/bin/mkdir -p /var/lib/maas/dhcp +ExecStartPre=/bin/chown root:root /var/lib/maas/dhcp +# Start the daemon +ExecStart=/bin/sh -ec '\ + INTERFACES=$(cat /var/lib/maas/dhcpd-interfaces); \ + LEASES_FILE=/var/lib/maas/dhcp/dhcpd6.leases; \ + [ -e $LEASES_FILE ] || touch $LEASES_FILE; \ + chown root:root /var/lib/maas/dhcp /var/lib/maas/dhcp/dhcpd6.leases*; \ + exec dhcpd -user dhcpd -group dhcpd -f -6 -pf /run/maas/dhcp/dhcpd6.pid \ + -cf /etc/maas/dhcpd6.conf -lf $LEASES_FILE $INTERFACES' + +[Install] +WantedBy=multi-user.target diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd6.upstart maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd6.upstart --- maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd6.upstart 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd6.upstart 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,65 @@ +description "MAAS instance of ISC DHCP server for IPv6" +author "Jeroen Vermeulen " + +start on runlevel [2345] +stop on runlevel [!2345] + +env CONFIG_FILE=/etc/maas/dhcpd6.conf +env PID_DIR=/run/maas/dhcp +env PID_FILE=/run/maas/dhcp/dhcpd6.pid +env LEASES_DIR=/var/lib/maas/dhcp +env LEASES_FILE=/var/lib/maas/dhcp/dhcpd6.leases + +# This is where we write what interfaces dhcpd should listen on. +env INTERFACES_FILE=/var/lib/maas/dhcpd6-interfaces + +pre-start script + if [ ! -f $CONFIG_FILE ]; then + echo "$CONFIG_FILE does not exist. Aborting." + stop + exit 0 + fi + + if [ ! -f $INTERFACES_FILE ]; then + echo "$INTERFACES_FILE does not exist. Aborting." + stop + exit 0 + fi + + if ! /usr/sbin/dhcpd -t -q -6 -cf $CONFIG_FILE > /dev/null 2>&1; then + echo "dhcpd self-test failed. Please fix the config file." + echo "The error was: " + /usr/sbin/dhcpd -t -6 -cf $CONFIG_FILE + stop + exit 0 + fi +end script + +respawn +script + INTERFACES=`cat "${INTERFACES_FILE}"` + + # Allow dhcp server to write lease and pid file. + mkdir -p $PID_DIR + chown dhcpd:dhcpd $PID_DIR + + # As of Quantal, the leases file must be owned by root:root (even though + # the daemon will run under an unprivileged user). + # In Precise, ownership was supposed to be dhcpd:dhcpd. + # + # maas packages on saucy are only supported with newer isc-dhcp via + # the cloud-archive. See bug 1231693 for more information, including + # a patch that would actually work to support all, but is complex. + + mkdir -p $LEASES_DIR + chown root:root $LEASES_DIR + [ -e $LEASES_FILE ] || touch $LEASES_FILE + for LFILE in $LEASES_FILE $LEASES_FILE~; do + if [ -e $LFILE ]; then + chown root:root $LFILE + chmod a+r $LFILE + fi + done + + exec /usr/sbin/dhcpd -user dhcpd -group dhcpd -f -q -6 -pf $PID_FILE -cf $CONFIG_FILE -lf $LEASES_FILE $INTERFACES +end script diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd.service maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd.service --- maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd.service 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd.service 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,27 @@ +[Unit] +Description=MAAS instance of ISC DHCP server for IPv4 +Documentation=man:dhcpd(8) +Wants=network-online.target +After=network-online.target +After=time-sync.target +ConditionPathExists=/etc/maas/dhcpd.conf +ConditionPathExists=/var/lib/maas/dhcpd-interfaces + +[Service] +# Allow dhcp server to write lease and pid file as 'dhcpd' user +ExecStartPre=/bin/mkdir -p /run/maas/dhcp +ExecStartPre=/bin/chown root:root /run/maas/dhcp +# The leases files need to be root:root even when dropping privileges +ExecStartPre=/bin/mkdir -p /var/lib/maas/dhcp +ExecStartPre=/bin/chown root:root /var/lib/maas/dhcp +# Start the daemon +ExecStart=/bin/sh -ec '\ + INTERFACES=$(cat /var/lib/maas/dhcpd-interfaces); \ + LEASES_FILE=/var/lib/maas/dhcp/dhcpd.leases; \ + [ -e $LEASES_FILE ] || touch $LEASES_FILE; \ + chown root:root /var/lib/maas/dhcp /var/lib/maas/dhcp/dhcpd.leases*; \ + exec dhcpd -user dhcpd -group dhcpd -f -q -4 -pf /run/maas/dhcp/dhcpd.pid \ + -cf /etc/maas/dhcpd.conf -lf $LEASES_FILE $INTERFACES' + +[Install] +WantedBy=multi-user.target diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd.upstart maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd.upstart --- maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcpd.upstart 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcpd.upstart 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,65 @@ +description "MAAS instance of ISC DHCP server for IPv4" +author "Jeroen Vermeulen " + +start on runlevel [2345] +stop on runlevel [!2345] + +env CONFIG_FILE=/etc/maas/dhcpd.conf +env PID_DIR=/run/maas/dhcp +env PID_FILE=/run/maas/dhcp/dhcpd.pid +env LEASES_DIR=/var/lib/maas/dhcp +env LEASES_FILE=/var/lib/maas/dhcp/dhcpd.leases + +# This is where we write what interfaces dhcpd should listen on. +env INTERFACES_FILE=/var/lib/maas/dhcpd-interfaces + +pre-start script + if [ ! -f $CONFIG_FILE ]; then + echo "$CONFIG_FILE does not exist. Aborting." + stop + exit 0 + fi + + if [ ! -f $INTERFACES_FILE ]; then + echo "$INTERFACES_FILE does not exist. Aborting." + stop + exit 0 + fi + + if ! /usr/sbin/dhcpd -t -q -4 -cf $CONFIG_FILE > /dev/null 2>&1; then + echo "dhcpd self-test failed. Please fix the config file." + echo "The error was: " + /usr/sbin/dhcpd -t -4 -cf $CONFIG_FILE + stop + exit 0 + fi +end script + +respawn +script + INTERFACES=`cat "${INTERFACES_FILE}"` + + # Allow dhcp server to write lease and pid file. + mkdir -p $PID_DIR + chown dhcpd:dhcpd $PID_DIR + + # As of Quantal, the leases file must be owned by root:root (even though + # the daemon will run under an unprivileged user). + # In Precise, ownership was supposed to be dhcpd:dhcpd. + # + # maas packages on saucy are only supported with newer isc-dhcp via + # the cloud-archive. See bug 1231693 for more information, including + # a patch that would actually work to support all, but is complex. + + mkdir -p $LEASES_DIR + chown root:root $LEASES_DIR + [ -e $LEASES_FILE ] || touch $LEASES_FILE + for LFILE in $LEASES_FILE $LEASES_FILE~; do + if [ -e $LFILE ]; then + chown root:root $LFILE + chmod a+r $LFILE + fi + done + + exec /usr/sbin/dhcpd -user dhcpd -group dhcpd -f -q -4 -pf $PID_FILE -cf $CONFIG_FILE -lf $LEASES_FILE $INTERFACES +end script diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcp-server.upstart maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcp-server.upstart --- maas-1.5.4+bzr2294/debian/maas-dhcp.maas-dhcp-server.upstart 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.maas-dhcp-server.upstart 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -description "MAAS instance of ISC DHCP server" -author "Jeroen Vermeulen " - -start on runlevel [2345] -stop on runlevel [!2345] - -env CONFIG_FILE=/etc/maas/dhcpd.conf -env PID_DIR=/run/maas/dhcp -env PID_FILE=/run/maas/dhcp/dhcpd.pid -env LEASES_DIR=/var/lib/maas/dhcp -env LEASES_FILE=/var/lib/maas/dhcp/dhcpd.leases - -# This is where we write what interfaces dhcpd should listen on. -env INTERFACES_FILE=/var/lib/maas/dhcpd-interfaces - -pre-start script - if [ ! -f $CONFIG_FILE ]; then - echo "$CONFIG_FILE does not exist. Aborting." - stop - exit 0 - fi - - if [ ! -f $INTERFACES_FILE ]; then - echo "$INTERFACES_FILE does not exist. Aborting." - stop - exit 0 - fi - - if ! /usr/sbin/dhcpd -t -q -4 -cf $CONFIG_FILE > /dev/null 2>&1; then - echo "dhcpd self-test failed. Please fix the config file." - echo "The error was: " - /usr/sbin/dhcpd -t -4 -cf $CONFIG_FILE - stop - exit 0 - fi -end script - -respawn -script - INTERFACES=`cat "${INTERFACES_FILE}"` - - # Allow dhcp server to write lease and pid file. - mkdir -p $PID_DIR - chown dhcpd:dhcpd $PID_DIR - - # As of Quantal, the leases file must be owned by root:root (even though - # the daemon will run under an unprivileged user). - # In Precise, ownership was supposed to be dhcpd:dhcpd. - # - # maas packages on saucy are only supported with newer isc-dhcp via - # the cloud-archive. See bug 1231693 for more information, including - # a patch that would actually work to support all, but is complex. - - mkdir -p $LEASES_DIR - chown root:root $LEASES_DIR - [ -e $LEASES_FILE ] || touch $LEASES_FILE - for LFILE in $LEASES_FILE $LEASES_FILE~; do - if [ -e $LFILE ]; then - chown root:root $LFILE - chmod a+r $LFILE - fi - done - - exec /usr/sbin/dhcpd -user dhcpd -group dhcpd -f -q -4 -pf $PID_FILE -cf $CONFIG_FILE -lf $LEASES_FILE $INTERFACES -end script diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.maintscript maas-1.7.6+bzr3376/debian/maas-dhcp.maintscript --- maas-1.5.4+bzr2294/debian/maas-dhcp.maintscript 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.maintscript 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,2 @@ +rm_conffile /etc/init/maas-dhcp-server.conf 1.7.0~beta6+bzr3231-0ubuntu1 +rm_conffile /etc/init/maas-dhcpv6-server.conf 1.7.0~beta6+bzr3231-0ubuntu1 diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.postinst maas-1.7.6+bzr3376/debian/maas-dhcp.postinst --- maas-1.5.4+bzr2294/debian/maas-dhcp.postinst 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.postinst 2015-07-10 01:27:33.000000000 +0000 @@ -2,14 +2,22 @@ set -e -if [ "$1" = "configure" ]; then - invoke-rc.d isc-dhcp-server stop +if [ "$1" = "configure" ] +then + # Stop the dhcpd instance that came with the isc-dhcp-server package. + # We run our own dhcpd instances. + if [ -d /run/systemd/system ]; then + systemctl stop isc-dhcp-server >/dev/null || true + systemctl disable isc-dhcp-server >/dev/null || true + else + invoke-rc.d isc-dhcp-server stop + fi - dhcpd_prof="/etc/apparmor.d/usr.sbin.dhcpd" - if [ -f "${dhcpd_prof}" ] && command -v apparmor_parser >/dev/null 2>&1 - then - apparmor_parser --replace --write-cache --skip-read-cache "${dhcpd_prof}" || true - fi + dhcpd_prof="/etc/apparmor.d/usr.sbin.dhcpd" + if [ -f "${dhcpd_prof}" ] && command -v apparmor_parser >/dev/null 2>&1 + then + apparmor_parser --replace --write-cache --skip-read-cache "${dhcpd_prof}" || true + fi fi #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.postrm maas-1.7.6+bzr3376/debian/maas-dhcp.postrm --- maas-1.5.4+bzr2294/debian/maas-dhcp.postrm 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -8,6 +8,10 @@ command -v apparmor_parser >/dev/null 2>&1; then apparmor_parser --replace --write-cache --skip-read-cache "${dhcpd_prof}" fi + if [ "$1" = "purge" ]; then + rm -rf /etc/maas/dhcpd.conf + rm -rf /etc/maas/dhcpd6.conf + fi fi #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-dhcp.preinst maas-1.7.6+bzr3376/debian/maas-dhcp.preinst --- maas-1.5.4+bzr2294/debian/maas-dhcp.preinst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dhcp.preinst 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,16 @@ +#!/bin/sh + +set -e + +. /usr/share/debconf/confmodule + +if [ "$1" = "upgrade" ] && dpkg --compare-versions "$2" lt 1.7.0~beta6+bzr3232-0ubuntu1; then + if [ -f /etc/init/maas-dhcp-server.conf ]; then + invoke-rc.d maas-dhcp-server stop + fi + if [ -f /etc/init/maas-dhcpv6-server.conf ]; then + invoke-rc.d maas-dhcpv6-server stop + fi +fi + +#DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-dns.postinst maas-1.7.6+bzr3376/debian/maas-dns.postinst --- maas-1.5.4+bzr2294/debian/maas-dns.postinst 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dns.postinst 2015-07-10 01:27:33.000000000 +0000 @@ -31,16 +31,14 @@ # /etc/named/maas/named.conf.options.inside.maas file. maas-region-admin edit_named_options --config-path /etc/bind/named.conf.options - invoke-rc.d bind9 restart || true +elif [ "$1" = "configure" ]; then + # If the /etc/bind/named.conf.options is not set up yet, do it now. + # This might happen because of an upgrade from an older package. + if ! grep -qs "named.conf.options.inside.maas" /etc/bind/named.conf.options; then + maas-region-admin edit_named_options --config-path /etc/bind/named.conf.options + fi fi -if [ "$1" = "configure" ]; then - # If the /etc/bind/named.conf.options is not set up yet, do it now. - # This might happen because of an upgrade from an older package. - if ! grep -qs "named.conf.options.inside.maas" /etc/bind/named.conf.options; then - maas-region-admin edit_named_options --config-path /etc/bind/named.conf.options - invoke-rc.d bind9 restart || true - fi -fi +invoke-rc.d bind9 restart || true #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-dns.postrm maas-1.7.6+bzr3376/debian/maas-dns.postrm --- maas-1.5.4+bzr2294/debian/maas-dns.postrm 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-dns.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -6,6 +6,8 @@ if [ -f /etc/bind/named.conf.local ]; then sed -i '/^include.*\"\/etc\/bind\/maas\/named.conf.maas\"\;$/d' \ /etc/bind/named.conf.local + sed -i '/^include.*\"\/etc\/bind\/maas\/named.conf.options.inside.maas\"\;$/d' \ + /etc/bind/named.conf.options fi fi diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.dirs maas-1.7.6+bzr3376/debian/maas-proxy.dirs --- maas-1.5.4+bzr2294/debian/maas-proxy.dirs 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.dirs 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1 @@ +var/log/maas/proxy diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.install maas-1.7.6+bzr3376/debian/maas-proxy.install --- maas-1.5.4+bzr2294/debian/maas-proxy.install 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.install 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,3 @@ +debian/extras/maas-proxy.conf etc/maas/ +debian/extras/maas-proxy-common.sh usr/share/maas/ +debian/extras/squid3.override etc/init/ diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.logrotate maas-1.7.6+bzr3376/debian/maas-proxy.logrotate --- maas-1.5.4+bzr2294/debian/maas-proxy.logrotate 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.logrotate 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,24 @@ +# +# Logrotate fragment for squid-deb-proxy. +# +/var/log/maas/proxy/*.log { + daily + compress + delaycompress + rotate 2 + missingok + nocreate + sharedscripts + postrotate + if [ -x /usr/sbin/squid ]; then + SQUID=/usr/sbin/squid + elif [ -x /usr/sbin/squid3 ]; then + SQUID=/usr/sbin/squid3 + else + echo "No squid binary found" + exit 1 + fi + + test ! -e /var/run/maas-proxy.pid || $SQUID -f /etc/maas/maas-proxy.conf -k rotate + endscript +} diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.maas-proxy.service maas-1.7.6+bzr3376/debian/maas-proxy.maas-proxy.service --- maas-1.5.4+bzr2294/debian/maas-proxy.maas-proxy.service 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.maas-proxy.service 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,15 @@ +[Unit] +Description=MAAS Proxy +Requires=network-online.target +After=network-online.target +ConditionPathExists=/etc/maas/maas-proxy.conf + +[Service] +ExecStartPre=/bin/mkdir -p /run/maas/proxy +ExecStartPre=/bin/sh -ec '\ + . /usr/share/maas/maas-proxy-common.sh; \ + pre_start' +ExecStart=/usr/sbin/squid3 -N -f /etc/maas/maas-proxy.conf + +[Install] +WantedBy=multi-user.target diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.maas-proxy.upstart maas-1.7.6+bzr3376/debian/maas-proxy.maas-proxy.upstart --- maas-1.5.4+bzr2294/debian/maas-proxy.maas-proxy.upstart 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.maas-proxy.upstart 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,24 @@ +# maas-proxy - a caching proxy +# + +description "maas-proxy" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +pre-start script + . /usr/share/maas/maas-proxy-common.sh + pre_start +end script + +script + if [ -x /usr/sbin/squid ]; then + SQUID=/usr/sbin/squid + elif [ -x /usr/sbin/squid3 ]; then + SQUID=/usr/sbin/squid3 + else + echo "No squid binary found" + exit 1 + fi + exec $SQUID -N -f /etc/maas/maas-proxy.conf +end script diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.postinst maas-1.7.6+bzr3376/debian/maas-proxy.postinst --- maas-1.5.4+bzr2294/debian/maas-proxy.postinst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.postinst 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,22 @@ +#!/bin/sh + +set -e + +if [ "$1" = "configure" ] +then + # Stop the squid3 instance that came with the isc-dhcp-server package. + # We run our own squid3 instances. + if [ -d /run/systemd/system ]; then + systemctl stop squid3 >/dev/null || true + systemctl disable squid3 >/dev/null || true + else + invoke-rc.d squid3 stop + fi + + # Ensure log folder is created. + mkdir -p /var/log/maas/proxy + chown -R proxy:proxy /var/log/maas/proxy + +fi + +#DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-proxy.postrm maas-1.7.6+bzr3376/debian/maas-proxy.postrm --- maas-1.5.4+bzr2294/debian/maas-proxy.postrm 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-proxy.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +if [ "$1" = "purge" ]; then + rm -rf /var/cache/maas-proxy + rm -rf /var/log/maas/proxy + rm -rf /var/spool/maas-proxy + rm -f /etc/maas/maas-proxy.conf +fi + +#DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.apport maas-1.7.6+bzr3376/debian/maas-region-controller-min.apport --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.apport 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.apport 2015-07-10 01:27:33.000000000 +0000 @@ -16,7 +16,7 @@ def add_info(report, ui): response = ui.yesno("The contents of your /etc/maas/maas_local_settings.py, " - "/etc/maas/txlongpoll.yaml, /etc/maas/pserv.yaml files " + "/etc/maas/pserv.yaml files " "may help developers diagnose your bug more " "quickly. However, it may contain sensitive " "information. Do you want to include it in your " @@ -29,9 +29,8 @@ attach_conffiles(report,'maas') # Attaching log files - attach_file_if_exists(report, '/var/log/maas/maas.log', 'MAASLog') + attach_file_if_exists(report, '/var/log/maas/maas-django.log', 'MAASLog') attach_file_if_exists(report, '/var/log/maas/pserv.log', 'MAASPservLog') - attach_file_if_exists(report, '/var/log/maas/txlongpoll.log', 'MAAStxlongpollLog') # Attaching related packages info attach_related_packages(report, ['python-django-maas', 'apparmor']) diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.install maas-1.7.6+bzr3376/debian/maas-region-controller-min.install --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.install 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.install 2015-07-10 01:27:33.000000000 +0000 @@ -10,31 +10,25 @@ # Install MAAS local settings debian/tmp/etc/maas/maas_local_settings.py -# Install celery config file -debian/tmp/usr/share/maas/celeryconfig.py - -# Install local celery region config file -debian/tmp/etc/maas/maas_local_celeryconfig.py - # Install WSGI debian/tmp/usr/share/maas/wsgi.py -# Install txlongpoll config file -debian/tmp/etc/maas/txlongpoll.yaml - # Install new preseed files debian/tmp/etc/maas/preseeds # Install templates debian/tmp/etc/maas/templates/dns debian/tmp/etc/maas/templates/commissioning-user-data +debian/tmp/etc/maas/templates/deployment-user-data # Install driver configuration file debian/tmp/etc/maas/drivers.yaml +# Install winrm certificate generation script +debian/tmp/usr/bin/maas-generate-winrm-cert + # Install all other stuff debian/extras/maas-region-admin usr/sbin -debian/extras/maas-region-celeryd usr/sbin debian/extras/20-maas.conf etc/rsyslog.d debian/extras/maas_remote_syslog_compress etc/cron.d debian/extras/99-maas usr/share/maas/conf diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.links maas-1.7.6+bzr3376/debian/maas-region-controller-min.links --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.links 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.links 2015-07-10 01:27:33.000000000 +0000 @@ -1,2 +1 @@ etc/maas/maas_local_settings.py usr/share/maas/maas_local_settings.py -etc/maas/maas_local_celeryconfig.py usr/share/maas/maas_local_celeryconfig.py diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.lintian-overrides maas-1.7.6+bzr3376/debian/maas-region-controller-min.lintian-overrides --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.lintian-overrides 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.lintian-overrides 2015-07-10 01:27:33.000000000 +0000 @@ -1,6 +1,6 @@ maas-region-controller: binary-without-manpage usr/sbin/maas -maas-region-controller: binary-without-manpage usr/sbin/maas-region-celeryd maas-region-controller: script-not-executable etc/maas/templates/commissioning-user-data/snippets/maas_ipmi_autodetect.py maas-region-controller: script-not-executable etc/maas/templates/commissioning-user-data/snippets/maas_signal.py maas-region-controller: script-not-executable etc/maas/templates/commissioning-user-data/user_data.template maas-region-controller: script-not-executable etc/maas/templates/commissioning-user-data/snippets/maas_get.py +maas-region-controller: script-not-executable etc/maas/templates/deployment-user-data/maas_configure_interfaces.py diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.logrotate maas-1.7.6+bzr3376/debian/maas-region-controller-min.logrotate --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.logrotate 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.logrotate 2015-07-10 01:27:33.000000000 +0000 @@ -1,4 +1,4 @@ -/var/log/maas/maas.log { +/var/log/maas/maas-django.log { rotate 5 weekly compress @@ -6,13 +6,3 @@ missingok #create 620 root www-data } - -/var/log/maas/txlongpoll.log { - rotate 5 - weekly - compress - missingok - postrotate - [ -r /var/run/maas-txlongpoll.pid ] && kill -s USR2 $(cat /var/run/maas-txlongpoll.pid) || true - endscript -} diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.maas-region-celery.upstart maas-1.7.6+bzr3376/debian/maas-region-controller-min.maas-region-celery.upstart --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.maas-region-celery.upstart 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.maas-region-celery.upstart 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -# maas-celery - celery daemon for the region controller -# -# MAAS Region Controller Celery Daemon - -description "MAAS" -author "Raphael Badin " - -start on filesystem and net-device-up and rabbitmq-server-running -stop on runlevel [016] - -respawn - -env workers_user="maas_workers" -env workers_pass="" -env workers_vhost="/maas_workers" -env DJANGO_SETTINGS_MODULE="maas.settings" - -pre-start script - if [ -f /usr/sbin/rabbitmqctl ] && ! /usr/sbin/rabbitmqctl list_user_permissions "$workers_user" 1>/dev/null 2>&1; then - workers_pass=`grep "maas_workers" /etc/maas/maas_local_celeryconfig.py | cut -d':' -f3 | cut -d'@' -f1` - /usr/sbin/rabbitmqctl add_user "$workers_user" "$workers_pass" - /usr/sbin/rabbitmqctl add_vhost "$workers_vhost" - /usr/sbin/rabbitmqctl set_permissions -p "$workers_vhost" "$workers_user" ".*" ".*" ".*" - fi -end script - -exec /usr/sbin/maas-region-celeryd --logfile=/var/log/maas/celery-region.log --schedule=/var/lib/maas/celerybeat-region-schedule --user=maas --group=maas diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.maas-txlongpoll.upstart maas-1.7.6+bzr3376/debian/maas-region-controller-min.maas-txlongpoll.upstart --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.maas-txlongpoll.upstart 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.maas-txlongpoll.upstart 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -# maas - txlongpoll service -# -# MAAS Provisioning Service txlongpoll - -description "MAAS txlongpoll" -author "Andres Rodriguez " - -start on filesystem and net-device-up and rabbitmq-server-running -stop on runlevel [016] - -respawn - -env longpoll_user="maas_longpoll" -env longpoll_pass="" -env longpoll_vhost="/maas_longpoll" - -pre-start script - if [ -f /usr/sbin/rabbitmqctl ] && ! /usr/sbin/rabbitmqctl list_user_permissions "$longpoll_user" 1>/dev/null 2>&1; then - longpoll_pass=`/bin/grep "password" /etc/maas/txlongpoll.yaml | cut -d'"' -f2` - /usr/sbin/rabbitmqctl add_user "$longpoll_user" "$longpoll_pass" - /usr/sbin/rabbitmqctl add_vhost "$longpoll_vhost" - /usr/sbin/rabbitmqctl set_permissions -p "$longpoll_vhost" "$longpoll_user" ".*" ".*" ".*" - fi -end script - -# To add options to your daemon, edit the line below: -exec /usr/bin/twistd -n --uid=maas --gid=maas --pidfile=/run/maas-txlongpoll.pid --logfile=/dev/null txlongpoll --config-file=/etc/maas/txlongpoll.yaml diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.maintscript maas-1.7.6+bzr3376/debian/maas-region-controller-min.maintscript --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.maintscript 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.maintscript 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,6 @@ +rm_conffile /etc/maas/txlongpoll.yaml 1.7.0~beta4+bzr3124-0ubuntu1 +rm_conffile /etc/init/maas-txlongpoll.conf 1.7.0~beta4+bzr3124-0ubuntu1 +rm_conffile /etc/init/maas-region-celery.conf 1.7.0~beta4+bzr3124-0ubuntu1 +rm_conffile /etc/maas/maas_local_celerconfig.py 1.7.0~beta4+bzr3124-0ubuntu1 +rm_conffile /usr/share/maas/maas_local_celerconfig.py 1.7.0~beta4+bzr3124-0ubuntu1 +rm_conffile /etc/maas/maas_local_celeryconfig.py 1.7.0~beta4+bzr3124-0ubuntu1 diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.postinst maas-1.7.6+bzr3376/debian/maas-region-controller-min.postinst --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.postinst 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.postinst 2015-07-10 01:27:33.000000000 +0000 @@ -7,10 +7,6 @@ RELEASE=`lsb_release -rs` || RELEASE="" -restart_squid_deb_proxy() { - invoke-rc.d squid-deb-proxy restart || true -} - configure_region_http() { case $RELEASE in 12.04|12.10|13.04) @@ -53,27 +49,6 @@ echo $ipaddr } -configure_maas_squid_deb_proxy() { - local ipaddr="$1" - - if [ -e /usr/share/maas/conf/99-maas -a \ - ! -L /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas ]; then - ln -sf /usr/share/maas/conf/99-maas \ - /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas - fi - - sed -i "s/\(^[a-zA-Z0-9\.\-].*\) # maasurl$/$ipaddr # maasurl/" \ - /usr/share/maas/conf/99-maas -} - -# Please keep this stanza until 14.10 -if [ "$1" = "configure" ]; then - if dpkg --compare-versions "$2" le-nl 1.5+bzr1909-0ubuntu1 ; then - chown root:maas /etc/maas/txlongpoll.yaml - chmod 0640 /etc/maas/txlongpoll.yaml - fi -fi - if [ "$1" = "configure" ] && [ -z "$2" ]; then ######################################################### ################ Folder Permissions #################### @@ -83,14 +58,8 @@ # Config will contain credentials, so should be readable # by the application but nobody else. - chown root:maas \ - /etc/maas/maas_local_celeryconfig.py \ - /etc/maas/maas_local_settings.py \ - /etc/maas/txlongpoll.yaml - chmod 0640 \ - /etc/maas/maas_local_celeryconfig.py \ - /etc/maas/maas_local_settings.py \ - /etc/maas/txlongpoll.yaml + chown root:maas /etc/maas/maas_local_settings.py + chmod 0640 /etc/maas/maas_local_settings.py ######################################################### ################ Configure Apache2 #################### @@ -111,7 +80,6 @@ # Set the IP address of the interface with default route if [ -n "$ipaddr" ]; then configure_maas_default_url "$ipaddr" - configure_maas_squid_deb_proxy "$ipaddr" db_set maas/default-maas-url "$ipaddr" fi @@ -120,23 +88,28 @@ ######################################################### # Give appropriate permissions - if [ ! -f /var/log/maas/maas.log ]; then - touch /var/log/maas/maas.log + if [ ! -f /var/log/maas/maas-django.log ]; then + touch /var/log/maas/maas-django.log fi chown -R maas:maas /var/log/maas chmod -R 775 /var/log/maas/oops + # Main syslog file. + if [ ! -f /var/log/maas/maas.log ]; then + touch /var/log/maas/maas.log + fi + chown syslog:syslog /var/log/maas/maas.log + # Create log directory base mkdir -p /var/log/maas/rsyslog chown -R syslog:syslog /var/log/maas/rsyslog # Make sure rsyslog reads our config invoke-rc.d rsyslog restart - ######################################################### - ################### Squid-deb-proxy #################### - ######################################################### - # Make sure squid-deb-proxy reads our config (99-maas) - restart_squid_deb_proxy + # If proxy log dir exists, set correct permissions + if [ -d /var/log/maas/proxy ]; then + chown -R proxy:proxy /var/log/maas/proxy + fi elif [ -n "$DEBCONF_RECONFIGURE" ]; then # Set the IP address of the interface with default route @@ -144,7 +117,6 @@ ipaddr="$RET" if [ -n "$ipaddr" ]; then configure_maas_default_url "$ipaddr" - configure_maas_squid_deb_proxy "$ipaddr" fi elif [ "$1" = "configure" ] && dpkg --compare-versions "$2" gt 0.1+bzr266+dfsg-0ubuntu1; then @@ -158,13 +130,10 @@ db_get maas/default-maas-url ipaddr="$RET" configure_maas_default_url "$ipaddr" - configure_maas_squid_deb_proxy "$ipaddr" fi invoke-rc.d apache2 restart || true -restart_squid_deb_proxy - db_stop #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.postrm maas-1.7.6+bzr3376/debian/maas-region-controller-min.postrm --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.postrm 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -32,6 +32,9 @@ if [ -L /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas ]; then rm -rf /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas fi + + # Delete any remaining / leftover file + rm -rf /usr/share/maas/maas/ esac #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller-min.preinst maas-1.7.6+bzr3376/debian/maas-region-controller-min.preinst --- maas-1.5.4+bzr2294/debian/maas-region-controller-min.preinst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller-min.preinst 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +. /usr/share/debconf/confmodule + +if [ "$1" = "upgrade" ] && dpkg --compare-versions "$2" lt 1.7.0~beta4+bzr3127-0ubuntu1; then + + if [ -f /etc/init/maas-txlongpoll.conf ]; then + invoke-rc.d maas-txlongpoll stop + fi + + if [ -f /var/log/maas/txlongpoll.log ]; then + rm -rf /var/log/maas/txlongpoll.log + fi + + if [ -f /etc/init/maas-region-celery.conf ]; then + invoke-rc.d maas-region-celery stop + fi + + if [ -f /var/log/maas/celery-region.log ]; then + rm -rf /var/log/maas/celery-region.log + fi + +fi + +#DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller.postinst maas-1.7.6+bzr3376/debian/maas-region-controller.postinst --- maas-1.5.4+bzr2294/debian/maas-region-controller.postinst 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller.postinst 2015-07-10 17:50:44.000000000 +0000 @@ -17,18 +17,10 @@ maas-region-admin migrate metadataserver --noinput } -restart_rabbitmq(){ - invoke-rc.d rabbitmq-server restart || true -} - restart_postgresql(){ invoke-rc.d --force postgresql restart || true } -restart_squid_deb_proxy() { - invoke-rc.d squid-deb-proxy restart || true -} - configure_region_http() { case $RELEASE in 12.04|12.10|13.04) @@ -52,57 +44,6 @@ a2enmod wsgi } -configure_maas_txlongpoll_rabbitmq_user() { - local longpoll_user="maas_longpoll" - local longpoll_pass= - local longpoll_vhost="/maas_longpoll" - longpoll_pass="$(pwgen -s 20)" - if [ -x /usr/sbin/rabbitmqctl ]; then - if ! rabbitmqctl list_users | grep -qs "$longpoll_user"; then - rabbitmqctl add_user "$longpoll_user" "$longpoll_pass" || true - rabbitmqctl add_vhost "$longpoll_vhost" || true - rabbitmqctl set_permissions -p "$longpoll_vhost" "$longpoll_user" ".*" ".*" ".*" || true - else - rabbitmqctl change_password "$longpoll_user" "$longpoll_pass" || true - fi - fi - - if grep -qs "^\ \{1,\}password: \"[a-zA-Z0-9]\{0,\}\"$" /etc/maas/txlongpoll.yaml; then - sed -i "s/^\ \{1,\}password: \"[a-zA-Z0-9]\{0,\}\"$/ password: \""$longpoll_pass"\"/" \ - /etc/maas/txlongpoll.yaml - fi - if grep -qs "^RABBITMQ_PASSWORD\ \= '[a-zA-Z0-9]\{0,\}'$" /etc/maas/maas_local_settings.py; then - sed -i "s/^RABBITMQ_PASSWORD\ \= '[a-zA-Z0-9]\{0,\}'$/RABBITMQ_PASSWORD = '"$longpoll_pass"'/" \ - /etc/maas/maas_local_settings.py - fi -} - -configure_maas_workers_rabbitmq_user() { - local workers_user="maas_workers" - local workers_pass="$(pwgen -s 20)" - local workers_vhost="/maas_workers" - local amqp_host="$1" - if [ -z "$amqp_host" ]; then - amqp_host="localhost" - fi - local amqp_port="5672" - if [ -x /usr/sbin/rabbitmqctl ]; then - if ! rabbitmqctl list_users | grep -qs "$workers_user"; then - rabbitmqctl add_user "$workers_user" "$workers_pass" || true - rabbitmqctl add_vhost "$workers_vhost" || true - rabbitmqctl set_permissions -p "$workers_vhost" "$workers_user" ".*" ".*" ".*" || true - else - rabbitmqctl change_password "$workers_user" "$workers_pass" || true - fi - fi - - if grep -qs "^BROKER_URL\ \= '.*'$" /etc/maas/maas_local_celeryconfig.py; then - local broker_url="amqp://$workers_user:$workers_pass@$amqp_host:$amqp_port/$workers_vhost" - sed -i "s|^BROKER_URL\ \= '.*'$|BROKER_URL = '"$broker_url"'|" \ - /etc/maas/maas_local_celeryconfig.py - fi -} - configure_maas_database() { local dbc_dbpass="$1" if grep -qs "^\ \{1,\} 'PASSWORD': '[a-zA-Z0-9]\{0,\}',$" /etc/maas/maas_local_settings.py; then @@ -131,17 +72,44 @@ echo $ipaddr } -configure_maas_squid_deb_proxy() { - local ipaddr="$1" +configure_logging() { + # Give appropriate permissions + if [ ! -f /var/log/maas/maas-django.log ]; then + touch /var/log/maas/maas-django.log + fi + chown -R maas:maas /var/log/maas + chmod -R 775 /var/log/maas/oops - if [ -e /usr/share/maas/conf/99-maas -a \ - ! -L /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas ]; then - ln -sf /usr/share/maas/conf/99-maas \ - /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas + # Main syslog file. + if [ ! -f /var/log/maas/maas.log ]; then + touch /var/log/maas/maas.log fi + chown syslog:syslog /var/log/maas/maas.log - sed -i "s/\(^[a-zA-Z0-9\.\-].*\) # maasurl$/$ipaddr # maasurl/" \ - /usr/share/maas/conf/99-maas + # Create log directory base + mkdir -p /var/log/maas/rsyslog + chown -R syslog:syslog /var/log/maas/rsyslog + # Make sure rsyslog reads our config + invoke-rc.d rsyslog restart + + # If proxy log dir exists, set correct permissions + if [ -d /var/log/maas/proxy ]; then + chown -R proxy:proxy /var/log/maas/proxy + fi +} + +configure_third_party_log_symlinks() { + ln -sf /var/log/apache2 /var/log/maas/ +} + +configure_migrate_maas_dns() { + # This only runs on upgrade. We only run this if the + # there are forwarders to migrate or no + # named.conf.options.inside.maas are present. + maas-region-admin edit_named_options \ + --migrate-conflicting-options --config-path \ + /etc/bind/named.conf.options || true + invoke-rc.d bind9 restart || true } if [ "$1" = "configure" ] && [ -z "$2" ]; then @@ -153,12 +121,8 @@ # Config will contain credentials, so should be readable # by the application but nobody else. - chown root:maas \ - /etc/maas/maas_local_celeryconfig.py \ - /etc/maas/maas_local_settings.py - chmod 0640 \ - /etc/maas/maas_local_celeryconfig.py \ - /etc/maas/maas_local_settings.py + chown root:maas /etc/maas/maas_local_settings.py + chmod 0640 /etc/maas/maas_local_settings.py ######################################################### ################ Configure Apache2 #################### @@ -179,7 +143,6 @@ # Set the IP address of the interface with default route if [ -n "$ipaddr" ]; then configure_maas_default_url "$ipaddr" - configure_maas_squid_deb_proxy "$ipaddr" db_subst maas/installation-note MAAS_URL "$ipaddr" db_set maas/default-maas-url "$ipaddr" fi @@ -188,39 +151,8 @@ ################ Configure Logging #################### ######################################################### - # Give appropriate permissions - if [ ! -f /var/log/maas/maas.log ]; then - touch /var/log/maas/maas.log - fi - chown -R maas:maas /var/log/maas - chmod -R 775 /var/log/maas/oops - - # Create log directory base - mkdir -p /var/log/maas/rsyslog - chown -R syslog:syslog /var/log/maas/rsyslog - # Make sure rsyslog reads our config - invoke-rc.d rsyslog restart - - ######################################################### - ################### Squid-deb-proxy #################### - ######################################################### - # Make sure squid-deb-proxy reads our config (99-maas) - invoke-rc.d squid-deb-proxy restart - - ######################################################### - ########## Configure longpoll rabbitmq config ########### - ######################################################### - - # Handle longpoll/rabbitmq publishing - restart_rabbitmq - configure_maas_txlongpoll_rabbitmq_user - - ######################################################### - ########## Configure worker rabbitmq config ########### - ######################################################### - - # Handle celery/rabbitmq publishing - configure_maas_workers_rabbitmq_user "$ipaddr" + configure_logging + configure_third_party_log_symlinks ######################################################### ################ Configure Database ################### @@ -237,6 +169,7 @@ db_get maas-region-controller/dbconfig-install if [ "$RET" = "true" ]; then maas_sync_migrate_db + configure_migrate_maas_dns fi # Display installation note @@ -249,12 +182,13 @@ ipaddr="$RET" if [ -n "$ipaddr" ]; then configure_maas_default_url "$ipaddr" - configure_maas_squid_deb_proxy "$ipaddr" - configure_maas_workers_rabbitmq_user "$ipaddr" fi - configure_maas_txlongpoll_rabbitmq_user elif [ "$1" = "configure" ] && dpkg --compare-versions "$2" gt 0.1+bzr266+dfsg-0ubuntu1; then + # Logging changed at r2611, ensure it is set up. + configure_logging + configure_third_party_log_symlinks + # If upgrading to any later package version, then upgrade db. invoke-rc.d apache2 stop || true @@ -268,12 +202,6 @@ db_get maas/default-maas-url ipaddr="$RET" configure_maas_default_url "$ipaddr" - configure_maas_squid_deb_proxy "$ipaddr" - # make sure rabbitmq is running - restart_rabbitmq - configure_maas_txlongpoll_rabbitmq_user - # Handle celery/rabbitmq publishing - configure_maas_workers_rabbitmq_user "$ipaddr" # handle database upgrade if [ -f /etc/dbconfig-common/maas-region-controller.conf ]; then # source dbconfig-common db config for maas-region-controller @@ -287,14 +215,14 @@ maas_sync_migrate_db + configure_migrate_maas_dns fi invoke-rc.d apache2 restart || true -restart_squid_deb_proxy - -invoke-rc.d maas-txlongpoll restart || true -invoke-rc.d maas-region-celery restart || true +if [ -f /etc/init/maas-clusterd.conf ]; then + invoke-rc.d maas-clusterd restart || true +fi db_stop diff -Nru maas-1.5.4+bzr2294/debian/maas-region-controller.postrm maas-1.7.6+bzr3376/debian/maas-region-controller.postrm --- maas-1.5.4+bzr2294/debian/maas-region-controller.postrm 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/maas-region-controller.postrm 2015-07-10 01:27:33.000000000 +0000 @@ -40,18 +40,6 @@ rm -rf /etc/squid-deb-proxy/mirror-dstdomain.acl.d/99-maas fi - # Remove rabbitmq/longpoll/celery - longpoll_user="maas_longpoll" - longpoll_vhost="/maas_longpoll" - workers_user="maas_workers" - workers_vhost="/maas_workers" - if [ -x /usr/sbin/rabbitmqctl ]; then - rabbitmqctl delete_vhost "$longpoll_vhost" || true - rabbitmqctl delete_user "$longpoll_user" || true - - rabbitmqctl delete_vhost "$workers_vhost" || true - rabbitmqctl delete_user "$workers_user" || true - fi esac #DEBHELPER# diff -Nru maas-1.5.4+bzr2294/debian/patches/01-fix-database-settings.patch maas-1.7.6+bzr3376/debian/patches/01-fix-database-settings.patch --- maas-1.5.4+bzr2294/debian/patches/01-fix-database-settings.patch 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/01-fix-database-settings.patch 2015-07-10 01:27:33.000000000 +0000 @@ -1,5 +1,5 @@ ---- maas-1.5+bzr1908.orig.orig/contrib/maas_local_settings.py 2014-02-07 11:22:59.330556956 -0500 -+++ maas-1.5+bzr1908.orig/contrib/maas_local_settings.py 2014-02-07 11:22:59.326556956 -0500 +--- a/contrib/maas_local_settings.py ++++ b/contrib/maas_local_settings.py @@ -7,7 +7,7 @@ DEFAULT_MAAS_URL = "http://maas.internal.example.com/" @@ -20,5 +20,5 @@ + 'USER': 'maas', + 'PASSWORD': 'maas', 'HOST': 'localhost', - } - } + 'OPTIONS': { + 'isolation_level': ISOLATION_LEVEL_READ_COMMITTED, diff -Nru maas-1.5.4+bzr2294/debian/patches/02-pserv-config.patch maas-1.7.6+bzr3376/debian/patches/02-pserv-config.patch --- maas-1.5.4+bzr2294/debian/patches/02-pserv-config.patch 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/02-pserv-config.patch 2015-07-10 01:27:33.000000000 +0000 @@ -3,8 +3,10 @@ include port, logfile, oops directory, cobbler url and username Author: Andres Rodriguez ---- a/etc/maas/pserv.yaml -+++ b/etc/maas/pserv.yaml +Index: maas-1.7.3+bzr3359.orig/etc/maas/pserv.yaml +=================================================================== +--- maas-1.7.3+bzr3359.orig.orig/etc/maas/pserv.yaml 2015-04-08 10:31:56.205881287 -0400 ++++ maas-1.7.3+bzr3359.orig/etc/maas/pserv.yaml 2015-04-08 10:32:06.505881551 -0400 @@ -6,7 +6,7 @@ ## running server. # @@ -12,18 +14,9 @@ -logfile: "/dev/null" +logfile: "/var/log/maas/pserv.log" - ## OOPS configuration (optional). + ## TFTP configuration. # -@@ -15,7 +15,7 @@ - # or directories other than what the oops machinery creates there. - # - # directory: -- directory: "logs/oops" -+ directory: "/var/log/maas/oops" - # reporter: - reporter: "maas-pserv" - -@@ -38,8 +38,6 @@ +@@ -18,8 +18,7 @@ # resource_root: /var/lib/maas/boot-resources/current/ # port: 69 @@ -31,4 +24,5 @@ ## The URL to be contacted to generate PXE configurations. # generator: http://localhost/MAAS/api/1.0/pxeconfig/ - generator: http://localhost:5243/api/1.0/pxeconfig/ ++ generator: http://localhost/MAAS/api/1.0/pxeconfig/ diff -Nru maas-1.5.4+bzr2294/debian/patches/03-fix-wsgi-djang16.patch maas-1.7.6+bzr3376/debian/patches/03-fix-wsgi-djang16.patch --- maas-1.5.4+bzr2294/debian/patches/03-fix-wsgi-djang16.patch 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/03-fix-wsgi-djang16.patch 2015-07-10 01:27:33.000000000 +0000 @@ -0,0 +1,18 @@ +Index: maas-1.7.3+bzr3363/contrib/wsgi.py +=================================================================== +--- maas-1.7.3+bzr3363.orig/contrib/wsgi.py 2015-04-09 12:09:21.000000000 -0400 ++++ maas-1.7.3+bzr3363/contrib/wsgi.py 2015-04-22 12:05:20.793014950 -0400 +@@ -24,6 +24,13 @@ + sys.path.append(current_path) + + os.environ['DJANGO_SETTINGS_MODULE'] = 'maas.settings' ++ ++# Use Django 1.6 if the python-django16 package is installed: this is ++# to get MAAS to work on vivid: vivid ships with Django 1.7 by default ++# and MAAS isn't yet compatible with Django 1.7. ++if os.path.exists('/usr/lib/django16'): ++ sys.path.insert(1, '/usr/lib/django16') ++ + import django.core.handlers.wsgi + application = django.core.handlers.wsgi.WSGIHandler() + diff -Nru maas-1.5.4+bzr2294/debian/patches/03-txlongpoll-config.patch maas-1.7.6+bzr3376/debian/patches/03-txlongpoll-config.patch --- maas-1.5.4+bzr2294/debian/patches/03-txlongpoll-config.patch 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/03-txlongpoll-config.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -Description: Use default settings for MAAS txlongpoll - Use default settings for MAAS txlongpoll. These default settings - include oops directory, Message broker configuration, logfile. -Author: Andres Rodriguez - ---- maas-1.5+bzr1908.orig.orig/etc/txlongpoll.yaml 2014-02-07 11:21:12.578557974 -0500 -+++ maas-1.5+bzr1908.orig/etc/txlongpoll.yaml 2014-02-07 11:21:12.578557974 -0500 -@@ -17,7 +17,7 @@ - # or directories other than what the oops machinery creates there. - # - # directory: "" -- directory: "logs/oops" -+ directory: "/var/log/maas/oops" - ## The reporter used when generating OOPS reports. - # reporter: "LONGPOLL" - reporter: "maas-txlongpoll" -@@ -25,14 +25,14 @@ - ## Message broker configuration. - # - broker: -- # host: "localhost" -- # port: 5672 -- # username: "guest" -- # password: "guest" -- # vhost: "/" -+ host: "localhost" -+ port: 5672 -+ username: "maas_longpoll" -+ password: "maaslongpoll" -+ vhost: "/maas_longpoll" - - ## Where to log. This log can be rotated by sending SIGUSR1 to the - ## running server. - # - # logfile: "txlongpoll.log" --logfile: "/dev/null" -+logfile: "/var/log/maas/txlongpoll.log" diff -Nru maas-1.5.4+bzr2294/debian/patches/04-enable-armhf-keystone.patch maas-1.7.6+bzr3376/debian/patches/04-enable-armhf-keystone.patch --- maas-1.5.4+bzr2294/debian/patches/04-enable-armhf-keystone.patch 2014-09-19 20:44:30.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/04-enable-armhf-keystone.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -Description: Add hardware enablement for armhf/keystone -Bug: https://bugs.launchpad.net/ubuntu/+source/maas/+bug/1350103 -Upstream: revno: 2634 ---- a/src/provisioningserver/driver/__init__.py -+++ b/src/provisioningserver/driver/__init__.py -@@ -145,6 +145,9 @@ builtin_architectures = [ - Architecture( - name="armhf/generic", description="armhf/generic", - pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), -+ Architecture( -+ name="armhf/keystone", description="armhf/keystone", -+ pxealiases=["arm"]), - # PPC64EL needs a rootdelay for PowerNV. The disk controller - # in the hardware, takes a little bit longer to come up then - # the initrd wants to wait. Set this to 60 seconds, just to diff -Nru maas-1.5.4+bzr2294/debian/patches/home-directory.patch maas-1.7.6+bzr3376/debian/patches/home-directory.patch --- maas-1.5.4+bzr2294/debian/patches/home-directory.patch 2014-12-04 18:59:51.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/home-directory.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -Description: Fix compatibility with mod-wsgi security update -Bug-Ubuntu: https://bugs.launchpad.net/maas/+bug/1399016 - -Index: maas-1.7.0~beta8+bzr3272/contrib/maas-http.conf -=================================================================== ---- maas-1.7.0~beta8+bzr3272.orig/contrib/maas-http.conf 2014-10-22 12:55:39.000000000 -0400 -+++ maas-1.7.0~beta8+bzr3272/contrib/maas-http.conf 2014-12-04 13:56:05.790551266 -0500 -@@ -1,4 +1,4 @@ --WSGIDaemonProcess maas user=maas group=maas processes=2 threads=1 display-name=%{GROUP} -+WSGIDaemonProcess maas user=maas group=maas home=/var/lib/maas/ processes=2 threads=1 display-name=%{GROUP} - - # Without this, defining a tag as a malformed xpath expression will hang - # the region controller. diff -Nru maas-1.5.4+bzr2294/debian/patches/series maas-1.7.6+bzr3376/debian/patches/series --- maas-1.5.4+bzr2294/debian/patches/series 2014-12-04 18:59:51.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/patches/series 2015-07-10 01:27:33.000000000 +0000 @@ -1,5 +1,3 @@ 01-fix-database-settings.patch 02-pserv-config.patch -03-txlongpoll-config.patch -04-enable-armhf-keystone.patch -home-directory.patch +03-fix-wsgi-djang16.patch diff -Nru maas-1.5.4+bzr2294/debian/rules maas-1.7.6+bzr3376/debian/rules --- maas-1.5.4+bzr2294/debian/rules 2014-06-20 14:44:21.000000000 +0000 +++ maas-1.7.6+bzr3376/debian/rules 2015-07-10 01:27:33.000000000 +0000 @@ -6,14 +6,25 @@ py_enums := $(wildcard src/*/enum.py) %: - dh $@ --with python2,apport --buildsystem=python_distutils + dh $@ --with python2,apport,systemd --buildsystem=python_distutils override_dh_installinit: - dh_installinit --name maas-txlongpoll --no-start - dh_installinit --name maas-region-celery --no-start - dh_installinit --name maas-pserv - dh_installinit --name maas-cluster-celery - dh_installinit --name maas-dhcp-server + # maas-clusterd + dh_systemd_enable --name maas-clusterd + dh_installinit --name maas-clusterd + dh_systemd_start --name maas-clusterd + # maas-dhcpd + dh_systemd_enable --name maas-dhcpd + dh_installinit --name maas-dhcpd + dh_systemd_start --name maas-dhcpd + # maas-dhcpd6 + dh_systemd_enable --name maas-dhcpd6 + dh_installinit --name maas-dhcpd6 + dh_systemd_start --name maas-dhcpd6 + # maas-proxy + dh_systemd_enable --name maas-proxy + dh_installinit --name maas-proxy + dh_systemd_start --name maas-proxy override_dh_auto_build: dh_auto_build @@ -53,4 +64,4 @@ | sed -rne 's,^Version: ([^-]+).*,\1,p') get-orig-source: bzr export -r $(REV) --root=maas-$(VER).orig \ - maas_$(VER).orig.tar.gz lp:maas/1.5 + maas_$(VER).orig.tar.gz lp:maas/1.7 diff -Nru maas-1.5.4+bzr2294/docs/about.rst maas-1.7.6+bzr3376/docs/about.rst --- maas-1.5.4+bzr2294/docs/about.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/about.rst 2015-07-10 01:27:14.000000000 +0000 @@ -15,15 +15,18 @@ ---------- In a cunning move, the current documentation always lives, and is -built from, the main MAAS source code. That means that whatever MAAS -package you have installed, or even if you are really living life on -the edge and have checked out a development version from Launchpad, -this documentation should be the latest and most appropriate version -for the software you are running. However, it is also possible that -there have been further sections or more helpful, or clearer bits -added since the package you are using was made. For this reason you -can always find the latest documentation online here: -http://maas.ubuntu.com +built from, the main MAAS source code (in the top-level ``docs/`` +directory). That means that whatever MAAS package you have installed, +or even if you are really living life on the edge and have checked out +a development version from Launchpad, this documentation should be the +latest and most appropriate version for the software you are running. +However, it is also possible that there have been additional sections, +or more helpful and clearer bits added since the package you are using +was made. For this reason you can always find the latest documentation +online here: `http://maas.ubuntu.com`_. + +.. _http://maas.ubuntu.com: + http://maas.ubuntu.com Contributing @@ -44,4 +47,4 @@ Download the source to MAAS by following the instructions in :doc:`the hacking guide `, make your changes, and propose a merge against lp:maas on Launchpad. The documentation source lives in the top-level -``doc/`` directory. +``docs/`` directory. diff -Nru maas-1.5.4+bzr2294/docs/api_authentication.rst maas-1.7.6+bzr3376/docs/api_authentication.rst --- maas-1.5.4+bzr2294/docs/api_authentication.rst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/api_authentication.rst 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,78 @@ +.. -*- mode: rst -*- + +.. _api_authentication: + +API authentication +================== + +MAAS's API uses OAuth_ as its authentication mechanism. There isn't a third +party involved (as in 3-legged OAuth) and so the process used is what's +commonly referred to as 0-legged OAuth: the consumer accesses protected +resources by submitting OAuth signed requests. + +.. _OAuth: http://en.wikipedia.org/wiki/OAuth + +Note that some API endpoints support unauthenticated requests (i.e. +anonymous access). See the :doc:`API documentation ` for details. + + +Examples +======== + +Here are two examples on how to perform an authenticated GET request to +retrieve the list of nodes. The , , tokens +are the three elements that compose the API key (API key = +'::'). + +Python +------ + +.. code:: python + + import oauth.oauth as oauth + import httplib2 + import uuid + + def perform_API_request(site, uri, method, key, secret, consumer_key): + resource_tok_string = "oauth_token_secret=%s&oauth_token=%s" % ( + secret, key) + resource_token = oauth.OAuthToken.from_string(resource_tok_string) + consumer_token = oauth.OAuthConsumer(consumer_key, "") + + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + consumer_token, token=resource_token, http_url=site, + parameters={'oauth_nonce': uuid.uuid4().get_hex()}) + oauth_request.sign_request( + oauth.OAuthSignatureMethod_PLAINTEXT(), consumer_token, + resource_token) + headers = oauth_request.to_header() + url = "%s%s" % (site, uri) + http = httplib2.Http() + return http.request(url, method, body=None, headers=headers) + + # API key = '::' + response = perform_API_request( + 'http://server/MAAS/api/1.0', '/nodes/?op=list', 'GET', '', '', + '') + +Ruby +---- + +.. code:: ruby + + require 'oauth' + require 'oauth/signature/plaintext' + + def perform_API_request(site, uri, key, secret, consumer_key) + consumer = OAuth::Consumer.new( + consumer_key, "", + { :site => "http://localhost/MAAS/api/1.0", + :scheme => :header, :signature_method => "PLAINTEXT"}) + access_token = OAuth::AccessToken.new(consumer, key, secret) + return access_token.request(:get, "/nodes/?op=list") + end + + # API key = "::" + response = perform_API_request( + "http://server/MAAS/api/1.0", "/nodes/?op=list", "", "", + "consumer_key>") diff -Nru maas-1.5.4+bzr2294/docs/bootsources.rst maas-1.7.6+bzr3376/docs/bootsources.rst --- maas-1.5.4+bzr2294/docs/bootsources.rst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/bootsources.rst 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,99 @@ +.. -*- mode: rst -*- + +.. _bootsources: + +Boot images import configuration +================================ + +The configuration for where a region downloads its images is defined by +a set of "sources". Each "source" defines a Simplestreams repository +location (``url``) from which images can be downloaded and a +``keyring_filename`` (or ``keyring_data``) for validating index and image +signatures from that location. For each source, you can define a series of +filters (``selections``) specifying which images should be downloaded from +that source. + +The following example use the MAAS CLI to list the boot sources and the boot +source selections. Assuming the CLI ``PROFILE`` is the name of the profile +under which you're logged in to the server:: + + $ maas $PROFILE boot-sources read + [ + { + "url": "http://maas.ubuntu.com/images/ephemeral-v2/releases/", + "keyring_data": "", + "resource_uri": "", + "keyring_filename": "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg", + "id": 1 + } + ] + + $ maas $PROFILE boot-source-selections read 1 + [ + { + "labels": [ + "release" + ], + "arches": [ + "amd64" + ], + "subarches": [ + "*" + ], + "release": "trusty", + "id": 1, + "resource_uri": "" + } + ] + + +Restricting the images being downloaded +--------------------------------------- + +Let's say you want to add a previous LTS release to images being downloaded. +Starting from the configuration described above, you would need to: + +- Add the "Precise" selection (the selection '1' of the source '1'):: + + $ maas $PROFILE boot-source-selection create 1 os="ubuntu" release="precise" arches="amd64" subarches="*" labels="*" + +Downloading the images from a different source +---------------------------------------------- + +Let's say you want to import the images from a different location. You would +need to to change the source's url and keyring:: + + $ maas $PROFILE boot-source update 1 url="http://custom.url" keyring_filename="" keyring_data@=./custom_keyring_file + { + "url": "http://custom.url/", + "keyring_data": "", + "resource_uri": "", + "keyring_filename": "", + "id": 1 + } + +Adding a source +--------------- + +You can also add a new source:: + + $ maas $PROFILE boot-sources create url=http://my.url keyring_filename="" keyring_data@=./ custom_keyring_file + { + "url": "http://my.url/", + "keyring_data": "ZW1wdHkK", + "keyring_filename": "", + "id": 2, + "resource_uri": "" + } + +Inside that newly created source ('2') you can add selections:: + + $ maas $PROFILE boot-source-selections create os="ubuntu" release="trusty" arches="amd64" subarches="*" labels='*' + { + "labels": ["*"], + "arches": ["amd64"], + "subarches": ["*"], + "release": "trusty", + "id": 3, + "resource_uri": "" + } diff -Nru maas-1.5.4+bzr2294/docs/capabilities.rst maas-1.7.6+bzr3376/docs/capabilities.rst --- maas-1.5.4+bzr2294/docs/capabilities.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/capabilities.rst 2015-07-10 01:27:14.000000000 +0000 @@ -29,3 +29,16 @@ Passive modelling of the network environment that cluster controllers nodes are in, including network interfaces, subnets, VLAN tags, and connectivity between them. See :ref:`networks` for more information. + +.. _cap-static-ipaddresses: + +``static-ipaddresses`` + Static IP address allocation to nodes, including user-reserved IPs and admin- + allocated 'sticky' IPs. Available since version 1.6. See :ref:`static-ips` + for more information. + +.. _cap-ipv6-deployment-ubuntu: + +``ipv6-deployment-ubuntu`` + Deploy Ubuntu nodes with IPv6 networking enabled. See :ref:`ipv6` for more + about this feature. diff -Nru maas-1.5.4+bzr2294/docs/changelog.rst maas-1.7.6+bzr3376/docs/changelog.rst --- maas-1.5.4+bzr2294/docs/changelog.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/changelog.rst 2015-07-10 01:27:14.000000000 +0000 @@ -2,20 +2,768 @@ Changelog ========= -1.5.4 +1.7.6 +===== + +Bug Fix Update +-------------- + +#1470585 Accept list of forwarders for upstream_dns rather than just one. + +#1413388 Fix upgrade issue where it would remove custom DNS config, + potentially breaking DNS. + +1.7.5 +===== + +Bug Fix Update +-------------- + +#1456969 MAAS cli/API: missing option set use-fast-installer / + use-debian-installer + +1.7.4 +===== + +Bug Fix Update +-------------- + +#1456892 500 error: UnboundLocalError: local variable 'key_required' + referenced before assignment +#1387859 When MAAS has too many leases, and lease parsing fails, MAAS fails + to auto-map NIC with network +#1329267 Alert a command-line user of `maas` when their local API + description is out-of-date. + +1.7.3 +===== + +Bug Fix Update +-------------- + +#1441933 Internal Server Error when saving a cluster without Router IP +#1441133 MAAS version not exposed over the API +#1437094 Sorting by mac address on webui causes internal server error +#1439359 Automatically set correct boot resources selection and start import + after upgrade from MAAS 1.5; Ensures MAAS is usable after upgrade. +#1439366 Backwards compatibility with MAAS 1.5 preseeds and custom preseeds. + Ensures that users dont have to manually change preseeds names. + +1.7.2 +===== + +Bug Fix Update +-------------- + +For full details see https://launchpad.net/maas/+milestone/1.7.2 + +#1331214 Support AMT Version > 8 +#1397567 Fix call to amttool when restarting a node to not fail disk erasing. +#1415538 Do not generate the 'option routers' stanza if router IP is None. +#1403909 Do not deallocate StaticIPAddress before node has powered off. +#1405998 Remove all OOPS reporting. +#1423931 Update the nodes host maps when a sticky ip address is claimed over the API. +#1433697 Look for bootloaders in /usr/lib/EXTLINUX + + +1.7.1 +===== + +Minor feature improvements +-------------------------- + +New CentOS Release support. + Further to the work done in the 1.7.0 MAAS Release, MAAS now supports + uploading various versions of CentOS. Previously MAAS would only + officially support 6.5. + +Power Monitoring for Seamicro 15000, Cisco UCS and HP Moonshot Chassis + Further the work done in the 1.7.0 MAAS release, it now supports power + query and monitoring for the Seamicro 15000 Chassis, the Cisco UCS + Chassis Manager and the HP Moonshot Chassis Manager. + +Node Listing Page and Node Event Log live refresh + The Node Listing page and the Node Event Log now have live refresh + every 10 seconds. This allows MAAS to display the latest node status + and events without forcing a browser refresh. + +IP Address Reservation + The static IP address reservation API now has an optional "mac" + parameter. Specifying a MAC address here will link the new static IP + to that MAC address. A DHCP host map will be created for the MAC + address. No other IPs may be reserved for that MAC address until the + current one is released. + +Bug fix update +-------------- + +For full details see https://launchpad.net/maas/+milestone/1.7.1 + +#1330765 If start_nodes() fails, it doesn't clean up after itself. +#1373261 pserv.yaml rewrite breaks when previous generator URL uses IPv6 address +#1386432 After update to the latest curtin that changes the log to install.log MAAS show's two installation logs +#1386488 If rndc fails, you get an Internal Server Error page +#1386502 No "failed" transition from "new" +#1386914 twisted Unhandled Error when region can't reach upstream boot resource +#1391139 Tagged VLAN on aliased NIC breaks migration 0099 +#1391161 Failure: twisted.internet.error.ConnectionDone: Connection was closed cleanly. +#1391411 metadata API signal() is releasing host maps at the end of installation +#1391897 Network names with dots cause internal server error when on node pages +#1394382 maas does not know about VM "paused" state +#1396308 Removing managed interface causes maas to delete nodes +#1397356 Disk Wiping fails if installation is not Ubuntu +#1398405 MAAS UI reports storage size in Gibibytes (base 2) but is labeled GB - Gigabytes (base 10). +#1399331 MAAS leaking sensitive information in ps ax output +#1400849 Check Power State disappears after upgrade to 1.7 bzr 3312 +#1401241 custom dd-tgz format images looked for in wrong path, so they don't work +#1401983 Exception: deadlock detected +#1403609 can not enlist chassis with maas admin node-group probe-and-enlist-mscm +#1283106 MAAS allows the same subnet to be defined on two managed interfaces of the same cluster +#1303925 commissioning fails silently if a node can't reach the region controller +#1357073 power state changes are not reflected quickly enough in the UI +#1360280 boot-source-selections api allows adding bogus and duplicated values +#1368400 Can't power off nodes that are in Ready state but on +#1370897 The node power monitoring service does not check nodes in parallel +#1376024 gpg --batch [...]` error caused by race in BootSourceCacheService +#1376716 AMT NUC stuck at boot prompt instead of powering down (no ACPI support in syslinux poweroff) +#1378835 Config does not have a unique index on name +#1379370 Consider removing transaction in claim_static_ip_addresses(). +#1379556 Panicky log warning that is irrelevant +#1381444 Misleading error message in log "Unknown power_type 'sm15k'" +#1382166 Message disclosing image import necessary visible while not logged in +#1382237 UnicodeEncodeError when unable to create host maps +#1383231 Error message when trying to reserve the same static IP twice is unhelpful +#1383237 Error message trying to reserve an IP address when no static range is defined is misleading +#1384424 Seamicro Machines do not have Power Status Tracking +#1384428 HP Moonshot Chassis Manager lacks power status monitoring +#1384924 need to provide a better upgrade message for images on the cluster but not on the region +#1386517 DHCP leases are not released at the end of commissioning and possibly enlistment +#1387239 MAAS does not provide an API for reserving a static IP for a given MAC address +#1387414 Race when registering new event type +#1388033 Trying to reserve a static IP when no more IPs are available results in 503 Service Unavailable with no error text +#1389602 Inconsistent behavior in the checks to delete a node +#1389733 node listing does not update the status and power of nodes +#1390144 Node 'releasing' should have a timeout +#1391193 API error documentation +#1391421 Names of custom boot-resources not visible in the web UI +#1391891 Spurious test failure: TestDNSForwardZoneConfig_GetGenerateDirectives.test_returns_single_entry_for_tiny_network +#1393423 PowerKVM / VIrsh import should allow you to specify a prefix to filter VM's to import +#1393953 dd-format images fail to deploy +#1400909 Networks are being autocreated like eth0-eth0 instead of maas-eth0 +#1401349 Memory size changes to incorrect size when page is refreshed +#1402237 Node event log queries are slow (over 1 second) +#1402243 Nodes in 'Broken' state are being power queried constantly +#1402736 clicking on zone link from node page - requested URL was not found on this server +#1403043 Wrong top-level tab is selected when viewing a node +#1381609 Misleading log message when a node has a MAC address not attached to a cluster interface +#1386909 Misleading Error: Unable to identify boot image for (ubuntu/amd64/generic/trusty/local): cluster 'maas' does not have matching boot image. +#1388373 Fresh image import of 3 archs displaying multiple rows for armhf and amd64 +#1398159 TFTP into MAAS server to get pxelinux.0 causes unhandled error +#1383651 Node.start() and Node.stop() raise MulltipleFailures unnecessarily +#1383668 null" when releasing an IP address is confusing +#1389416 Power querying for UCSM not working +#1399676 UX bug: mac address on the nodes page should be the MAC address it pxe booted from +#1399736 MAAS should display memory sizes in properly labeld base 2 units - MiB, GiB, etc. +#1401643 Documentation has wrong pattern for user provided preseeds +#1401707 Slow web performance (5+ minute response time) on MAAS with many nodes +#1403609 Fix MSCM chassis enlistment. +#1409952 Correctly parse MAC Address for Power8 VM enlistment. +#1409852 Do not fail when trying to perform an IP Address Reservation. +#1413030 OS and Release no longer populate on Add Node page +#1414036 Trying to add an empty network crashes (AddrFormatError) + + +1.7.0 +===== + +Important announcements +----------------------- + +**Re-import your boot images** + You must re-import your boot images, see below for details. + +**Update Curtin preseed files** + Two changes were made to Curtin preseed files that need your attention + if you made any customisations: + + * The OS name must now appear in the filename. The new schema is shown + here, each file pattern is tried in turn until a match is found:: + + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release}_{node_name} + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release} + {prefix}_{osystem}_{node_arch}_{node_subarch} + {prefix}_{osystem}_{node_arch} + {prefix}_{osystem} + {prefix} + + * If you are modifying ``/etc/network/interfaces`` in the preseed, it must be + moved so it is processed last in ``late_commands`` since MAAS now writes + to this file itself as part of IPv6 setup. For example:: + + late_commands: + bonding_02: ["curtin", "in-target", "--", "wget", "-O", "/etc/network/interfaces", "http://[...snip...]"] + + must now look like this:: + + late_commands: + zz_write_ifaces: ["curtin", "in-target", "--", "wget", "-O", "/etc/network/interfaces", "http://[...snip...]"] + + The leading ``zz`` ensures the command sorts to the end of the + ``late_commands`` list. + + +Major new features +------------------ + +**Improved image downloading and reporting.** + MAAS boot images are now downloaded centrally by the region controller + and disseminated to all registered cluster controllers. This change includes + a new web UI under the `Images` tab that allows the admin to select + which images to import and shows the progress of the ongoing download. + This completely replaces any file-based configuration that used to take + place on cluster controllers. The cluster page now shows whether it has + synchronised all the images from the region controller. + + This process is also completely controllable using the API. + +.. Note:: + Unfortunately due to a format change in the way images are stored, it + was not possible to migrate previously downloaded images to the new region + storage. The cluster(s) will still be able to use the existing images, + however the region controller will be unaware of them until an import + is initiated. When the import is finished, the cluster(s) will remove + older image resources. + + This means that the first thing to do after upgrading to 1.7 is go to the + `Images` tab and re-import the images. + +**Increased robustness.** + A large amount of effort has been given to ensuring that MAAS remains + robust in the face of adversity. An updated node state model has been + implemented that takes into account more of the situations in which a + node can be found including any failures at each stage. + + When a node is getting deployed, it is now monitored to check that each + stage is reached in a timely fashion; if it does not then it is marked + as failed. + + The core power driver was updated to check the state of the power on each + node and is reported in the web UI and API. The core driver now also + handles retries when changing the power state of hardware, removing the + requirement that each power template handle it individually. + +**RPC security.** + As a step towards mutually verified TLS connections between MAAS's + components, 1.7 introduces a simple shared-secret mechanism to + authenticate the region with the clusters and vice-versa. For those + clusters that run on the same machine as the region controller (which + will account for most people), everything will continue to work + without intervention. However, if you're running a cluster on a + separate machine, you must install the secret: + + 1. After upgrading the region controller, view /var/lib/maas/secret + (it's text) and copy it. + + 2. On each cluster, run: + + sudo -u maas maas-provision install-shared-secret + + You'll be prompted for the secret; paste it in and press enter. It + is a password prompt, so the secret will not be echoed back to you. + + That's it; the upgraded cluster controller will find the secret + without needing to be told. + +**RPC connections.** + Each cluster maintains a persistent connection to each region + controller process that's running. The ports on which the region is + listening are all high-numbered, and they are allocated randomly by + the OS. In a future release of MAAS we will narrow this down. For now, + each cluster controller needs unfiltered access to each machine in the + region on all high-numbered TCP ports. + +**Node event log.** + For every major event on nodes, it is now logged in a node-specific log. + This includes events such as power changes, deployments and any failures. + +**IPv6.** + It is now possible to deploy Ubuntu nodes that have IPv6 enabled. + See :doc:`ipv6` for more details. + +**Removal of Celery and RabbitMQ.** + While Celery was found to be very reliable it ultimately did not suit + the project's requirements as it is a largely fire-and-forget mechanism. + Additionally it was another moving part that caused some headaches for + users and admins alike, so the decision was taken to remove it and implement + a custom communications mechanism between the region controller and cluster + controllers. The new mechanism is bidirectional and allowed the complex + interactions to take place that are required as part of the robustness + improvements. + + Since a constant connection is maintained, as a side effect the web UI now + shows whether each cluster is connected or not. + +**Support for other OSes.** + Non-Ubuntu OSes are fully supported now. This includes: + - Windows + - Centos + - SuSE + +**Custom Images.** + MAAS now supports the deployment of Custom Images. Custom images can be + uploaded via the API. The usage of custom images allows the deployment of + other Ubuntu Flavors, such as Ubuntu Desktop. + +**maas-proxy.** + MAAS now uses maas-proxy as the default proxy solution instead of + squid-deb-proxy. On a fresh install, MAAS will use maas-proxy by default. + On upgrades from previous releases, MAAS will install maas-proxy instead of + squid-deb-proxy. + +Minor notable changes +--------------------- + +**Better handling of networks.** + All networks referred to by cluster interfaces are now automatically + registered on the Network page. Any node network interfaces are + automatically linked to the relevant Network. + +.. Note:: + Commissioning currently requires an IP address to be available for each + network interface on a network that MAAS manages; this allows MAAS to + auto-populate its networks database. In general you should use a + well-sized network (/16 recommended if you will be using containers and + VMs) and dynamic pool. If this feature risks causing IP exhaustion for + your deployment and you do not need the auto-populate functionality, you + can disable it by running the following command on your region controller:: + + sudo maas maas set-config name=enable_dhcp_discovery_on_unconfigured_interfaces value=False + +**Improved logging.** + A total overhaul of where logging is produced was undertaken, and now + all the main events in MAAS are selectively reported to syslog with the + "maas" prefix from both the region and cluster controllers alike. If MAAS + is installed using the standard Ubuntu packaging, its syslog entries are + redirected to /var/log/maas/maas.log. + + On the clusters, pserv.log is now less chatty and contains only errors. + On the region controller appservers, maas-django.log contains only appserver + errors. + +**Static IP selection.** + The API was extended so that specific IPs can be pre-allocated for network + interfaces on nodes and for user-allocated IPs. + +**Pronounceable random hostnames.** + The old auto-generated 5-letter names were replaced with a pseudo-random + name that is produced from a dictionary giving names of the form + 'adjective-noun'. + + +Known Problems & Workarounds +---------------------------- + +**Upgrade issues** + There may be upgrade issues for users currently on MAAS 1.5 and 1.6; while we + have attempted to reproduce and address all the issues reported, some bugs + remain inconclusive. We recommend a full, tested backup of the MAAS servers + before attempting the upgrade to 1.7. If you do encounter issues, please file + these and flag them to the attention of the MAAS team and we will address them + in point-releases. See bugs `1381058`_, `1382266`_, `1379890`_, `1379532`_, + and `1379144`_. + +.. _1381058: + https://launchpad.net/bugs/1381058 +.. _1382266: + https://launchpad.net/bugs/1382266 +.. _1379890: + https://launchpad.net/bugs/1379890 +.. _1379532: + https://launchpad.net/bugs/1379532 +.. _1379144: + https://launchpad.net/bugs/1379144 + +**Split Region/Cluster set-ups** + If you site your cluster on a separate host to the region, it needs a + security key to be manually installed by running + ``maas-provision install-shared-secret`` on the cluster host. + +**Private boot streams** + If you had private boot image stream information configured in MAAS 1.5 or + 1.6, upgrading to 1.7 will not take that into account and it will need to be + manually entered on the settings page in the MAAS UI (bug `1379890`_) + +.. _1379890: + https://launchpad.net/bugs/1379890 + +**Concurrency issues** + Concurrency issues expose us to races when simultaneous operations are + triggered. This is the source of many hard to reproduce issues which will + require us to change the default database isolation level. We intend to address + this in the first point release of 1.7. + +**Destroying a Juju environment** + When attempting to "juju destroy" an environment, nodes must be in the DEPLOYED + state; otherwise, the destroy will fail. You should wait for all in-progress + actions on the MAAS cluster to conclude before issuing the command. (bug + `1381619`_) + +.. _1381619: + https://launchpad.net/bugs/1381619 + +**AMT power control** + A few AMT-related issues remain, with workarounds: + + * Commissioning NUC reboots instead of shutting down (bug `1368685`_). There + is `a workaround in the power template`_ + + * MAAS (amttool) cannot control AMT version > 8. See `workaround described in + bug 1331214`_ + + * AMT NUC stuck at boot prompt instead of powering down (no ACPI support in + syslinux poweroff) (bug `1376716`_). See the `ACPI-only workaround`_ + +.. _1368685: + https://bugs.launchpad.net/maas/+bug/1368685 +.. _a workaround in the power template: + https://bugs.launchpad.net/maas/+bug/1368685/comments/8 +.. _workaround described in bug 1331214: + https://bugs.launchpad.net/maas/+bug/1331214/comments/18 +.. _1376716: + https://bugs.launchpad.net/maas/+bug/1376716 +.. _ACPI-only workaround: + https://bugs.launchpad.net/maas/+bug/1376716/comments/12 + + +**Disk wiping** + If you enable disk wiping, juju destroy-environment may fail for you. The + current workaround is to wait and re-issue the command. This will be fixed in + future versions of MAAS & Juju. (bug `1386327`_) + +.. _1386327: + https://bugs.launchpad.net/maas/+bug/1386327 + +**BIND with DNSSEC** + If you are using BIND with a forwarder that uses DNSSEC and have not + configured certificates, you will need to explicitly disable that feature in + your BIND configuration (1384334) + +.. _1384334: + https://bugs.launchpad.net/maas/+bug/1384334 + +**Boot source selections on the API** + Use of API to change image selections can leave DB in a bad state + (bug `1376812`_). It can be fixed by issuing direct database updates. + +.. _1376812: + https://bugs.launchpad.net/maas/+bug/1376812 + +**Disabling DNS** + Disabling DNS may not work (bug `1383768`_) + +.. _1383768: + https://bugs.launchpad.net/maas/+bug/1383768 + +**Stale DNS zone files** + Stale DNS zone files may be left behind if the MAAS domainname is changed + (bug `1383329`_) + +.. _1383329: + https://bugs.launchpad.net/maas/+bug/1383329 + + + +Major bugs fixed in this release +-------------------------------- + +See https://launchpad.net/maas/+milestone/1.7.0 for full details. + +#1081660 If maas-enlist fails to reach a DNS server, the node will be named ";; connection timed out; no servers could be reached" + +#1087183 MaaS cloud-init configuration specifies 'manage_etc_hosts: localhost' + +#1328351 ConstipationError: When the cluster runs the "import boot images" task it blocks other tasks + +#1342117 CLI command to set up node-group-interface fails with /usr/lib/python2.7/dist-packages/maascli/__main__.py: error: u'name' + +#1349254 Duplicate FQDN can be configured on MAAS via CLI or API + +#1352575 BMC password showing in the apache2 logs + +#1355534 UnknownPowerType traceback in appserver log + +#1363850 Auto-enlistment not reporting power parameters + +#1363900 Dev server errors while trying to write to '/var/lib/maas' + +#1363999 Not assigning static IP addresses + +#1364481 http 500 error doesn't contain a stack trace + +#1364993 500 error when trying to acquire a commissioned node (AddrFormatError: failed to detect a valid IP address from None) + +#1365130 django-admin prints spurious messages to stdout, breaking scripts + +#1365850 DHCP scan using cluster interface name as network interface? + +#1366172 NUC does not boot after power off/power on + +#1366212 Large dhcp leases file leads to tftp timeouts + +#1366652 Leaking temporary directories + +#1368269 internal server error when deleting a node + +#1368590 Power actions are not serialized. + +#1370534 Recurrent update of the power state of nodes crashes if the connection to the BMC fails. + +#1370958 excessive pserv logging + +#1372767 Twisted web client does not support IPv6 address + +#1372944 Twisted web client fails looking up IPv6 address hostname + +#1373031 Cannot register cluster + +#1373103 compose_curtin_network_preseed breaks installation of all other operating systems + +#1373368 Conflicting power actions being dropped on the floor can result in leaving a node in an inconsistent state + +#1373699 Cluster Listing Page lacks feedback about the images each cluster has + +#1374102 No retries for AMT power? + +#1375980 Nodes failed to transition out of "New" state on bulk commission + +#1376023 After performing bulk action on maas nodes, Internal Server Error + +#1376888 Nodes can't be deleted if DHCP management is off. + +#1377099 Bulk operation leaves nodes in inconsistent state + +#1379209 When a node has multiple interfaces on a network MAAS manages, MAAS assigns static IP addresses to all of them + +#1379744 Cluster registration is fragile and insecure + +#1380932 MAAS does not cope with changes of the dhcp daemons + +#1381605 Not all the DNS records are being added when deploying multiple nodes + +#1012954 If a power script fails, there is no UI feedback + +#1186196 "Starting a node" has different meanings in the UI and in the API. + +#1237215 maas and curtin do not indicate failure reasonably + +#1273222 MAAS doesn't check return values of power actions + +#1288502 archive and proxy settings not honoured for commissioning + +#1316919 Checks don't exist to confirm a node will actually boot + +#1321885 IPMI detection and automatic setting fail in ubuntu 14.04 maas + +#1325610 node marked "Ready" before poweroff complete + +#1325638 Add hardware enablement for Universal Management Gateway + +#1340188 unallocated node started manually, causes AssertionError for purpose poweroff + +#1341118 No feedback when IPMI credentials fail + +#1341121 No feedback to user when cluster is not running + +#1341581 power state is not represented in api and ui + +#1341800 MAAS doesn't support soft power off through the API + +#1344177 hostnames can't be changed while a node is acquired + +#1347518 Confusing error message when API key is wrong + +#1349496 Unable to request a specific static IP on the API + +#1349736 MAAS logging is too verbose and not very useful + +#1349917 guess_server_address() can return IPAddress or hostname + +#1350103 No support for armhf/keystone architecture + +#1350856 Can't constrain acquisition of nodes by not having a tag + +#1356880 MAAS shouldn't allow changing the hostname of a deployed node + +#1357714 Virsh power driver does not seem to work at all + +#1358859 Commissioning output xml is hard to understand, would be nice to have yaml as an output option. + +#1359169 MAAS should handle invalid consumers gracefully + +#1359822 Gateway is missing in network definition + +#1363913 Impossible to remove last MAC from network in UI + +#1364228 Help text for node hostname is wrong + +#1364591 MAAS Archive Mirror does not respect non-default port + +#1365616 Non-admin access to cluster controller config + +#1365619 DNS should be an optional field in the network definition + +#1365776 commissioning results view for a node also shows installation results + +#1366812 Old boot resources are not being removed on clusters + +#1367455 MAC address for node's IPMI is reversed looked up to yield IP address using case sensitive comparison + +#1373580 [SRU] Glen m700 cartridge list as ARM64/generic after enlist + +#1373723 Releasing a node without power parameters ends up in not being able to release a node + +#1233158 no way to get power parameters in api + +#1319854 `maas login` tells you you're logged in successfully when you're not + +#1368480 Need API to gather image metadata across all of MAAS + +#1281406 Disk/memory space on Node edit page have no units + +#1299231 MAAS DHCP/DNS can't manage more than a /16 network + +#1357381 maas-region-admin createadmin shows error if not params given + +#1376393 powerkvm boot loader installs even when not needed + +#1287224 MAAS random generated hostnames are not pronounceable + +#1348364 non-maas managed subnets cannot query maas DNS + + +1.6.1 ===== Bug fix update -------------- - - Package fails to install when the default route is through an - aliased/tagged interface (LP: #1350235) - - ERROR Nonce already used (LP: #1190986) - - Add MAAS arm64/xgene support (LP: #1338851) - - Add utopic support (LP: #1337437) - - API documentation for nodegroup op=details missing parameter - (LP: #1331982) +- Auto-link node MACs to Networks (LP: #1341619) + MAAS will now auto-create a Network from a cluster interface, and + if an active lease exists for a node's MAC then it will be linked to + that Network. + + +1.6.0 +===== + +Special notice: + Cluster interfaces now have static IP ranges in order to give nodes stable + IP addresses. You need to set the range in each interface to turn on this + feature. See below for details. + +Major new features +------------------ + +IP addresses overhaul. + This release contains a total reworking of IP address allocation. You can + now define a separate "static" range in each cluster interface configuration + that is separate from the DHCP server's dynamic range. Any node in use by + a user will receive an IP address from the static range that is guaranteed + not to change during its allocated lifetime. Previously, this was at the + whim of the DHCP server despite MAAS placing host maps in its configuration. + + Currently, dynamic IP addresses will continue to receive DNS entries so as + to maintain backward compatibility with installations being upgraded from + 1.5. However, this will be changed in a future release to only give + DNS entries to static IPs. + + You can also use the API to `reserve IP addresses`_ on a per-user basis. + +.. _reserve IP addresses: http://maas.ubuntu.com/docs1.6/api.html#ip-addresses + +Support for additional OSes. + MAAS can now install operating systems other than Ubuntu on nodes. + Preliminary beta support exists for CentOS and SuSE via the `Curtin`_ "fast" + installer. This has not been thoroughly tested yet and has been provided + in case anyone finds this useful and is willing to help find and report bugs. + + +Minor notable changes +--------------------- + +DNS entries + In 1.5 DNS entries for nodes were a CNAME record. As of 1.6, they are now + all "A" records, which allows for reliable reverse look-ups. + + Only nodes that are allocated to a user and started will receive "A" record + entries. Unallocated nodes no longer have DNS entries. + +Removal of bootresources.yaml + The bootresources.yaml file, which had to be configured separately on each + cluster controller, is no longer in use. Instead, the configuration for + which images to download is now held by the region controller, and defaults + to downloading all images for LTS releases. A `rudimentary API`_ is + available to manipulate this configuration. + +.. _rudimentary API: http://maas.ubuntu.com/docs1.6/api.html#boot-source + +Fast installer is now the default + Prevously, the slower Debian installer was used by default. Any newly- + enlisted nodes will now use the newer `fast installer`_. Existing nodes + will keep the installer setting that they already have. + +.. _fast installer: https://launchpad.net/curtin + + +Bugs fixed in this release +-------------------------- +#1307779 fallback from specific to generic subarch broken +#1310082 d-i with precise+hwe-s stops at "Architecture not supported" +#1314174 Autodetection of the IPMI IP address fails when the 'power_address' +of the power parameters is empty. +#1314267 MAAS dhcpd will re-issue leases for nodes +#1317675 Exception powering down a virsh machine +#1322256 Import boot resources failing to verify keyring +#1322336 import_boot_images crashes with KeyError on 'keyring' +#1322606 maas-import-pxe-files fails when run from the command line +#1324237 call_and_check does not report error output +#1328659 import_boot_images task fails on utopic +#1332596 AddrFormatError: failed to detect a valid IP address from None executing upload_dhcp_leases task +#1250370 "sudo maas-import-ephemerals" steps on ~/.gnupg/pubring.gpg +#1250435 CNAME record leaks into juju's private-address, breaks host based access control +#1305758 Import fails while writing maas.meta: No such file or directory +#1308292 Unhelpful error when re-enlisting a previously enlisted node +#1309601 maas-enlist prints "successfully enlisted" even when enlistment fail +s. +#1309729 Fast path installer is not the default +#1310844 find_ip_via_arp() results in unpredictable, and in some cases, incorrect IP addresses +#1310846 amt template gives up way too easily +#1312863 MAAS fails to detect SuperMicro-based server's power type +#1314536 Copyright date in web UI is 2012 +#1315160 no support for different operating systems +#1316627 API needed to allocate and return an extra IP for a container +#1323291 Can't re-commission a commissioning node +#1324268 maas-cli 'nodes list' or 'node read ' doesn't display the osystem or distro_series node fields +#1325093 install centos using curtin +#1325927 YUI.Array.each not working as expected +#1328656 MAAS sends multiple stop_dhcp_server tasks even though there's no dhcp server running. +#1331139 IP is inconsistently capitalized on the 'edit a cluster interface' p +age +#1331148 When editing a cluster interface, last 3 fields are unintuitive +#1331165 Please do not hardcode the IP address of Canonical services into MAAS managed DHCP configs +#1338851 Add MAAS arm64/xgene support +#1307693 Enlisting a SeaMicro or Virsh chassis twice will not replace the missing entries +#1311726 No documentation about the supported power types and the related power parameters +#1331982 API documentation for nodegroup op=details missing parameter +#1274085 error when maas can't meet juju constraints is confusing and not helpful +#1330778 MAAS needs support for managing nodes via the Moonshot HP iLO Chassis Manager CLI +#1337683 The API client MAASClient doesn't encode list parameters when doing a GET +#1190986 ERROR Nonce already used +#1342135 Allow domains to be used for NTP server configuration, not just IPs +#1337437 Allow 14.10 Utopic Unicorn as a deployable series +#1350235 Package fails to install when the default route is through an aliased/tagged interface +#1353597 PowerNV: format_bootif should make sure mac address is all lowercase 1.5.3 ===== @@ -36,22 +784,17 @@ 1.5.2 ===== -Bug fix update --------------- +Minor feature changes +--------------------- -- Remove workaround for fixed Django bug 1311433 (LP: #1311433) -- Ensure that validation errors are returned when adding a node over - the API and its cluster controller is not contactable. (LP: #1305061) -- Hardware enablement support for PowerKVM -- Shorten the time taken for a cluster to initially connect to the region - via RPC to around 2 seconds (LP: #1317682) -- Faster DHCP leases parser (LP: #1305102) -- Documentation fixed explaining how to enable an ephemeral backdoor - (LP: #1321696) -- Use probe-and-enlist-hardware to enlist all virtual machine inside - a libvirt machine, allow password qemu+ssh connections. - (LP: #1315155, LP: #1315157) -- Rename ppc64el boot loader to PowerKVM (LP: #1315154) +Boot resource download changes. + Further to the work done in the 1.5 (Ubuntu 14.04) release, MAAS no + longer stores the configuration for downloading boot resources in + ``/etc/maas/bootresources.yaml``; this file is now obsolete. The + sources list is now stored on the region controller and passed to the + cluster controller when the job to download boot resources is started. + It is still possible to pass a list of sources to + ``maas-import-pxe-files`` when running the script manually. 1.5.1 @@ -175,7 +918,7 @@ #1255479 MaaS Internal Server Error 500 while parsing tags with namespaces in definition upon commissioning -#1269648 OAuth unauthorized errors mask the actual error text +#1269648 OAuth unauthorised errors mask the actual error text #1270052 Adding an SSH key fails due to a UnicodeDecodeError @@ -241,7 +984,7 @@ #1237197 No scheduled job for images download -#1238284 mutiple ip address displayed for a node +#1238284 multiple ip address displayed for a node #1243917 'maas createsuperuser' errors out if no email address is entered. @@ -269,7 +1012,7 @@ #1274465 Network identity shows broadcast address instead of the network's address -#1274499 dhcp lease rollover causes loss of access to managment IP +#1274499 dhcp lease rollover causes loss of access to management IP #1275643 When both IPMI 1.5 and 2.0 are available, MAAS should use 2.0 diff -Nru maas-1.5.4+bzr2294/docs/cluster-configuration.rst maas-1.7.6+bzr3376/docs/cluster-configuration.rst --- maas-1.5.4+bzr2294/docs/cluster-configuration.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/cluster-configuration.rst 2015-07-10 01:27:14.000000000 +0000 @@ -1,3 +1,7 @@ +.. -*- mode: rst -*- + +.. _cluster-configuration: + Cluster Configuration ===================== @@ -36,7 +40,7 @@ are being specially routed between the nodes' subnet and MAAS's DHCP server. 2. The cluster controller must be able to find nodes' IP addresses based on their MAC addresses, by inspecting its ARP cache. This implies that the - nodes and the clsuter controler must on the same physical subnet. + nodes and the cluster controller must be on the same physical subnet. Cluster acceptance @@ -73,13 +77,18 @@ MAAS automatically recognises the network interfaces on each cluster controller. Some (though not necessarily all) of these will be connected to -networks where you want to manage nodes. We recommend letting your cluster -controller act as a DHCP server for these networks, by configuring those -interfaces in the MAAS user interface. +networks where you want to manage nodes. A connection between a cluster +controller and a network is called a `cluster interface`. Each cluster +interface is built on exactly one network interface, though it's possible for +two cluster interfaces to use the same network interface card. + +We recommend letting your cluster controller act as a DHCP server for the +networks it manages, by configuring the corresponding cluster interfaces in +the MAAS user interface. As an example, we will configure the cluster controller to manage a network -on interface ``eth0``. Click on the edit icon for ``eth0``, which takes us -to this page: +on interface ``eth0``. Click on the edit icon for the cluster interface on +network interface ``eth0``, which takes us to this page: .. image:: media/cluster-interface-edit.png @@ -91,11 +100,6 @@ the DNS server included with the region controller so that it can be used to look up hosts on this network by name. -.. note:: - You cannot have DNS management without DHCP management because MAAS relies on - its own DHCP server's leases file to work out the IP address of nodes in the - cluster. - If you set the interface to be managed, you now need to provide all of the usual DHCP details in the input fields below. Once done, click "Save interface". The cluster controller will now be able to boot nodes on this @@ -106,11 +110,37 @@ manage nodes but don't want the cluster controller to serve DHCP, you may be able to get by without it. This is explained in :ref:`manual-dhcp`. +.. _static-ip-address: + +Static vs Dynamic IP Addresses +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +On the cluster interface edit page, there are fields to enter both a dynamic +and a static range of IP addresses. It is mandatory to enter the dynamic range +if you are managing DHCP on this interface, but the static range is optional. + +Dynamic addresses are given to both unknown devices booting on this network, +and Nodes that are commissioning. Dynamic addresses are allocated by the +DHCP server and may change at any time. + +Static addresses are given to Nodes when they are allocated to a user and +started up, and returned to the pool only when the Node is de-allocated. +Static addresses are allocated by MAAS, and are guaranteed not to change while +allocated. If you are managing DNS on this network, only static IP addresses +are given DNS entries with the Node's name. + +If you do not configure the static range, then nodes will only get dynamic +IP addresses and will never get a DNS entry. + +IP addresses in the static range are also available for reservation by users +using the :doc:`api`. This prevents MAAS from allocating the reserved +IP to any Nodes or other devices, which allows users to assign it freely +to their own hosts/devices on the same network, such as LXC containers. + Multiple networks ----------------- A single cluster controller can manage more than one network, each from a -different network interface on the cluster-controller server. This may help -you scale your cluster to larger numbers of nodes, or it may be a requirement -of your network architecture. +different cluster interface. This may help you scale your cluster to larger +numbers of nodes, or it may be a requirement of your network architecture. diff -Nru maas-1.5.4+bzr2294/docs/configure.rst maas-1.7.6+bzr3376/docs/configure.rst --- maas-1.5.4+bzr2294/docs/configure.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/configure.rst 2015-07-10 01:27:14.000000000 +0000 @@ -13,6 +13,10 @@ existing DHCP server for the network will need its configuration altered to allow MAAS to enlist and control nodes automatically. +.. note:: + If you don't let MAAS manage DHCP, then MAAS will not be able to allocate + its :ref:`static IP addresses ` to Nodes. + At the very least the "filename" option should be set to "pxelinux.0". How to configure this depends on what software you use as a DHCP server. If diff -Nru maas-1.5.4+bzr2294/docs/conf.py maas-1.7.6+bzr3376/docs/conf.py --- maas-1.5.4+bzr2294/docs/conf.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/conf.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,14 +13,20 @@ # Import maas' settings. from os import environ + + environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.settings") # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) -# -- General configuration ----------------------------------------------------- +import sys, os +# Include '.' in the path so that our custom extension, 'versions', can +# be found. +sys.path.insert(0, os.path.abspath('.')) + +# -- Multiple documentation options. # Add a widget to switch between different versions of the documentation to # each generated page. @@ -39,6 +45,24 @@ # version switcher widget. versions_path = '_static/versions.js' +# Versions to include in the version switcher. +# Note that the version switcher fetches the list of the documentation versions +# from the list published by the trunk documentation (i.e. in '//'). +# This means the following list is meaningful only for trunk. +# The first item should be the development version. +from collections import OrderedDict +doc_versions = OrderedDict([ + ('dev', 'Development trunk'), + ('1.7', 'MAAS 1.7'), + ('1.6', 'MAAS 1.6'), + ('1.5', 'MAAS 1.5'), + ('1.4', 'MAAS 1.4'), + ('1.3', 'MAAS 1.3'), + ('1.2', 'MAAS 1.2'), +]) + +# -- General configuration ----------------------------------------------------- + # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' @@ -51,6 +75,7 @@ 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', + 'versions', ] # Add any paths that contain templates here, relative to this directory. @@ -74,9 +99,9 @@ # built documents. # # The short X.Y version. -version = '1.5' +version = '1.7' # The full version, including alpha/beta/rc tags. -release = '1.5' +release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -273,4 +298,3 @@ 'bzr_last_revision_number': bzr_last_revision_number, 'bzr_build_date': bzr_build_date, } - diff -Nru maas-1.5.4+bzr2294/docs/development/cluster-bootstrap.rst maas-1.7.6+bzr3376/docs/development/cluster-bootstrap.rst --- maas-1.5.4+bzr2294/docs/development/cluster-bootstrap.rst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/cluster-bootstrap.rst 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,81 @@ +Bootstrapping a cluster +======================= + + +Considerations +-------------- + +A new cluster needs to register itself with the region. At the same +moment that it's accepted by the region, the region starts configuring +it via RPC, so we need an RPC connection open when registering. + +Before a cluster is accepted, we want to restrict the available RPC +calls to a small set, both on the region and the cluster. + +Before a cluster is accepted, we also do not want to start some services +on the cluster, like lease uploads, DHCP scanning, and so forth, because +the region will reject interaction from them. + + +Start-up procedure +------------------ + +This procedure will be followed by existing clusters and new clusters +alike: + +#. Cluster starts. + +#. If shared secret not available, shutdown, **DONE**. + +#. ``ClusterClientService`` starts. + +#. Services other than ``log`` are **not** started. + +#. Wait for a connection to the region to become available. + +#. Do not allow any RPC calls other than ``Identify`` and ``Authenticate``. + +#. Call ``Identify``. + +#. Call ``Authenticate``. + + - On success, continue. + + - On failure, shutdown, **DONE**. + +#. Permit all other RPC calls. + + - This allows for side-effects from calling ``Register`` next, like DHCP + configuration. + +#. Call ``Register``. Region accepts cluster. + +#. Start all services. + +#. **DONE**. + + +Work items +---------- + +#. **DONE:** Add ``Authenticate`` RPC call. + +#. **DONE:** Add ``Register`` RPC call. + +#. **DONE:** Command-line to install shared-secret. + +#. **DONE:** Check for shared-secret during start-up (packaging change too?). + +#. **DONE:** Perform ``Authenticate`` handshake. + +#. **DONE:** Perform ``Register`` handshake. + +#. **DONE:** Pass MAAS_URL in ``Register`` call. This replicates functionality + found in ``update_nodegroup_maas_url``, which is no longer used. + +#. Display secret to admins in UI, or provide tool to obtain secret + locally on region controller's machine. + +#. Mechanism to limit available RPC calls. + +#. Mechanism to defer start-up of "full" services. diff -Nru maas-1.5.4+bzr2294/docs/development/cluster-registration.rst maas-1.7.6+bzr3376/docs/development/cluster-registration.rst --- maas-1.5.4+bzr2294/docs/development/cluster-registration.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/cluster-registration.rst 2015-07-10 01:27:14.000000000 +0000 @@ -34,8 +34,7 @@ CLUSTER_UUID="adfd3977-f251-4f2c-8d61-745dbd690bf2" The values here are the defaults in the development environment. MAAS_URL -tells the cluster controller where to find the region controller, and is -sourced as a shell script by ``services/cluster-worker/run``. +tells the cluster controller where to find the region controller. ``CLUSTER_UUID`` is what the region uses to tell clusters apart when they connect. Each cluster is free to generate its own UUID but the development diff -Nru maas-1.5.4+bzr2294/docs/development/lease-scanning-and-dns.rst maas-1.7.6+bzr3376/docs/development/lease-scanning-and-dns.rst --- maas-1.5.4+bzr2294/docs/development/lease-scanning-and-dns.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/lease-scanning-and-dns.rst 2015-07-10 01:27:14.000000000 +0000 @@ -20,16 +20,10 @@ =============== MAAS will periodically scan the DHCP leases file using the -``upload_dhcp_leases()`` celery task, which originates via celerybeat. +``LeaseUploadService()`` pserv service. -As leases are discovered, it calls the api function ``update_leases()`` which -will subsequently generate a new task called ``add_new_dhcp_host_map()`` that -will use omshell to write a permanent mapping from MAC to IP in the DHCP -server. - -That host map remains until the node is deleted, at which point the -``remove_dhcp_host_map()`` task is invoked which uses omshell to remove the -map. +As leases are discovered, it calls the RPC function ``UpdateLeases`` which +stores the active leases in the DHCPLease table. Updating the DNS zone file diff -Nru maas-1.5.4+bzr2294/docs/development/metadata.rst maas-1.7.6+bzr3376/docs/development/metadata.rst --- maas-1.5.4+bzr2294/docs/development/metadata.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/metadata.rst 2015-07-10 01:27:14.000000000 +0000 @@ -118,7 +118,7 @@ passed as a multipart form item called "op". Other parameters are passed in the same way. -The ``signal`` call notifies the region controler of the state of a +The ``signal`` call notifies the region controller of the state of a commissioning node. The node sends running updates, as well as output produced by the commissioning scripts, and finally completion information through this call. diff -Nru maas-1.5.4+bzr2294/docs/development/preseeds.rst maas-1.7.6+bzr3376/docs/development/preseeds.rst --- maas-1.5.4+bzr2294/docs/development/preseeds.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/preseeds.rst 2015-07-10 01:27:14.000000000 +0000 @@ -5,51 +5,52 @@ ========================= A preseed is what MAAS sends to the ``cloud-init`` process that starts up -as part of node installation and commissioning. It is a specially formatted -chunk of text. MAAS does not care what that formatting is, it just knows -it is text and uses Tempita_ templates to render a final file based on -some variables that are required depending on the context. +as part of node enlistment, commissioning and installation. It is a +specially formatted chunk of text. MAAS does not care what that formatting +is, it just knows it is text and uses Tempita_ templates to render a final +file based on variables that are required depending on the context. .. _Tempita: http://pythonpaste.org/tempita/ -Preseed template structure --------------------------- - -The preseed templates live in the source tree at ``contrib/preseeds_v2/`` -and the following files exist there:: - - commissioning enlist_userdata preseed_master - enlist generic preseed_xinstall - -The file that is used as a template for the preseed depends on the state -of the node that is being booted up. - -+--------------+----------------------------+ -|State | Template used | -+==============+============================+ -|Enlistment |``enlist`` | -+--------------+----------------------------+ -|Commissioning |``commissioning`` | -+--------------+----------------------------+ -|Installation |- ``generic``, plus one of: | -| |- ``preseed_master`` or | -| |- ``preseed_xinstall`` | -+--------------+----------------------------+ +Preseed templates +----------------- -The ``enlist_userdata`` is not a preseed but it is a template that's used -to send data to ``cloud-init`` when a new node is enlisted. +On a live system, the preseed templates live in ``/etc/maas/preseeds/``. -There are also `User-provided preseeds`_, see below. +Each template uses a prefix that corresponds to a particular "phase": ++---------------+--------------------------+ +| Phase | Prefix used | ++===============+==========================+ +| Enlistment | enlist | ++---------------+--------------------------+ +| Commissioning | commissioning | ++---------------+--------------------------+ +| Installation | preseed_master (DI_) or | +| | curtin (Curtin_) | ++---------------+--------------------------+ + +.. _DI: https://www.debian.org/devel/debian-installer/ + +.. _Curtin: https://launchpad.net/curtin + +Note that the preseed information is in fact composed of two files: the +preseed file per se which usually contains little more than a URL and the +credentials where to get the "user data" which, in turn, contains most of +the logic to be executed. Installation preseed -------------------- The installation preseed is broken into the three files because the requirements are different depending on the installer being used. The -Debian Installer uses ``preseed_master`` and the newer Curtin installer -uses ``preseed_xinstall``. +`Debian Installer`_ uses ``preseed_master`` and the newer Curtin_ installer +uses ``curtin_userdata``. + +.. _Debian Installer: https://www.debian.org/devel/debian-installer/ + +.. _Curtin: https://launchpad.net/curtin The base of both of these is ``generic``, which defines some variables and then selects the right one depending on the installer being used. Note @@ -63,45 +64,86 @@ ---------------------- In addition to the standard preseed files, the base preseeds can be -overridden by end users on a per-architecture, OS release and node name basis. -The templates are looked up in the following order:: +overridden on a per-OS, architecture, subarchitecture, OS release and +node name basis. The templates are looked up in the following order:: - {prefix}_{node_architecture}_{node_subarchitecture}_{release}_{node_name} - {prefix}_{node_architecture}_{node_subarchitecture}_{release} - {prefix}_{node_architecture}_{node_subarchitecture} - {prefix}_{node_architecture} + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release}_{node_name} + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release} + {prefix}_{osystem}_{node_arch}_{node_subarch} + {prefix}_{osystem}_{node_arch} + {prefix}_{osystem} {prefix} 'generic' -``prefix`` is either empty or one of ``enlist`` or ``commissioning``. + Note: in order to be backward-compatible with earlier versions of MAAS that + only supported the Ubuntu OS, if the node OS is Ubuntu paths without the + {osystem} are also tried: + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release}_{node_name} + {prefix}_{node_arch}_{node_subarch}_{release}_{node_name} + {prefix}_{osystem}_{node_arch}_{node_subarch}_{release} + {prefix}_{node_arch}_{node_subarch}_{release} + {prefix}_{osystem}_{node_arch}_{node_subarch} + {prefix}_{node_arch}_{node_subarch} + {prefix}_{osystem}_{node_arch} + {prefix}_{node_arch} + {prefix}_{osystem} + {prefix} + 'generic' + +``prefix`` is either empty (in which case the following underscore is also +ommitted: e.g. {osystem}_{node_arch}_{node_subarch}_{release}) or one of +``enlist``, ``enlist_userdata``, ``commissioning``, ``curtin``, +``curtin_userdata`` or ``preseed_master``. As you can see this mechanism is also used to calculate the base preseeds for all of installation, enlistment and commissioning. It allows end users to -add, for example, a file named ``amd64_generic_saucy`` that would be used -instead of the ``generic`` template at installation time. +add, for example, a file named ``curtin_ubuntu_amd64_generic`` that would be +used at installation time. -Context variables ------------------ +Curtin configuration +-------------------- + +Curtin_ is the tool responsible for performing the OS installation. If you +need to customize the installation, you need to change Curtin's user data +(by either changing the existing ``curtin_userdata`` file or adding a custom +version as described above). + +.. _Curtin: https://launchpad.net/curtin + +There isn't a complete documentation on how to customize Curtin at the time of +this writing but the following instructions and examples should cover most of +the use cases. + +Curtin provides hooks to execute custom code before (`early`) or after (`late`) +the installation takes place. You can override these hooks to execute code, +either code that will run in the ephemeral environment or in the machine being +installed itself (`in-target`). Note that you can execute `in-target` code +only in a `late` command. + +Example: early command +====================== + +Here is an example of an early command (i.e. one that will run before the +installation takes place) that runs in the ephemeral environment and +pings an external machine to signal that the installation is about to start. + +.. code:: yaml + + early_commands: + signal: [wget, '--no-proxy', 'http://example.com/', '--post-data', 'system_id={{node.system_id}}&signal=starting_install', '-O', '/dev/null'] + +Example: late command +====================== + +Here is an example of two late commands (i.e. commands that will run after the +installation has been performed). Both run `in-target` (i.e. in the machine +being installed). The first command adds a PPA to the machine. The second +command create a file containing the node's system_id. (Note that these are +just examples of the things that can be done.) + +.. code:: yaml -Most of the context variables comes from code in ``src/maasserver/preseed.py`` -in the ``get_preseed_context()`` function. However there are many small -functions in that file, this is the full call tree:: - - get_preseed() - | - render_preseed() - | - load_preseed_template() - | | - | get_preseed_filenames() - | | - | get_preseed_template() - | - get_preseed_context() - | - get_node_preseed_context() - | | - | compose_preseed() - (comes from src/maasserver/compose_preseed.py) - | - template.substitute(**context) + late_commands: + add_repo: ["curtin", "in-target", "--", "add-apt-repository", "-y", "ppa:my/ppa"] + custom: curtin in-target -- sh -c "/bin/echo -en 'Installed {{node.system_id}}' > /tmp/maas_system_id" diff -Nru maas-1.5.4+bzr2294/docs/development/rpc.rst maas-1.7.6+bzr3376/docs/development/rpc.rst --- maas-1.5.4+bzr2294/docs/development/rpc.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/rpc.rst 2015-07-10 01:27:14.000000000 +0000 @@ -55,7 +55,7 @@ names used in the command's ``arguments`` declaration. * They *must* return a dict that matches the command's ``response`` - declararation. + declaration. * If the ``response`` declaration is empty they *must* still return an empty dict. diff -Nru maas-1.5.4+bzr2294/docs/development/tagging.rst maas-1.7.6+bzr3376/docs/development/tagging.rst --- maas-1.5.4+bzr2294/docs/development/tagging.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/development/tagging.rst 2015-07-10 01:27:14.000000000 +0000 @@ -64,7 +64,7 @@ XML output, every tag with an expression must be evaluated against the result so that the node is correctly tagged. -To do this, ``maasserver.api.VersionIndexHandler.signal`` calls +To do this, ``VersionIndexHandler.signal`` calls ``populate_tags_for_single_node`` just before saving all the changes. This happens in the **region**. While it's a computationally expensive operation, the overhead of spinning this work out to a cluster diff -Nru maas-1.5.4+bzr2294/docs/enum.rst maas-1.7.6+bzr3376/docs/enum.rst --- maas-1.5.4+bzr2294/docs/enum.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/enum.rst 2015-07-10 01:27:14.000000000 +0000 @@ -17,6 +17,3 @@ .. autoclass:: maasserver.enum.NODE_STATUS :members: - -.. autoclass:: maasserver.enum.DISTRO_SERIES - :members: diff -Nru maas-1.5.4+bzr2294/docs/getting-help.rst maas-1.7.6+bzr3376/docs/getting-help.rst --- maas-1.5.4+bzr2294/docs/getting-help.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/getting-help.rst 2015-07-10 01:27:14.000000000 +0000 @@ -45,5 +45,5 @@ This will create a tarball containing MAAS' log files, MAAS' configuration files and a dump of MAAS' database. By default, the tarball will end up in /tmp but you can change the location, see sosreport's manpage for details. If there are things you do not wish to share publicly, feel free to edit the tarball. -Now, the last step is to make this file available by any means at your disposal (openly accessible FTP server, Ubuntu One, Dropbox, etc.) in order for the people who will help you to be able to get their hands on it. +Now, the last step is to make this file available by any means at your disposal (openly accessible FTP server, Dropbox, etc.) in order for the people who will help you to be able to get their hands on it. diff -Nru maas-1.5.4+bzr2294/docs/hacking.rst maas-1.7.6+bzr3376/docs/hacking.rst --- maas-1.5.4+bzr2294/docs/hacking.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/hacking.rst 2015-07-10 01:27:14.000000000 +0000 @@ -170,7 +170,7 @@ $ make -Create the database cluster and initialize the development database:: +Create the database cluster and initialise the development database:: $ make syncdb @@ -225,7 +225,7 @@ First create a superuser and start all MAAS services:: - $ bin/maas-region-admin createsuperuser + $ bin/maas-region-admin createadmin $ make run Substitute your own email. The command will prompt for a choice of password. @@ -292,14 +292,14 @@ Running the cluster worker ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The cluster worker process also needs authbind as it needs to bind a socket -on UDP port 68 for DHCP probing:: +The cluster also needs authbind as it needs to bind a socket on UDP port +68 for DHCP probing:: $ sudo touch /etc/authbind/byport/68 $ sudo chmod a+x /etc/authbind/byport/68 If you omit this, nothing else will break, but you will get an error in -the cluster celeryd log because it can't bind to the port. +the cluster log because it can't bind to the port. Configuring DHCP diff -Nru maas-1.5.4+bzr2294/docs/hardware-enablement-kernels.rst maas-1.7.6+bzr3376/docs/hardware-enablement-kernels.rst --- maas-1.5.4+bzr2294/docs/hardware-enablement-kernels.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/hardware-enablement-kernels.rst 2015-07-10 01:27:14.000000000 +0000 @@ -17,9 +17,9 @@ ------------------------------------- Brand new hardware gets released all the time. We want that hardware to -work well wih Ubuntu and MAAS, even if it was released after the latest -release of MAAS or Ubuntu. Hardware Enablement (HWE) is all about making -keeping pace with the new hardware. +work well with Ubuntu and MAAS, even if it was released after the latest +release of MAAS or Ubuntu. Hardware Enablement (HWE) is all about keeping +pace with the new hardware. Ubuntu's solution to this is to offer newer kernels for older releases. There are at least two kernels on offer for Ubuntu releases: the @@ -39,53 +39,26 @@ .. _LTS Enablement Stack: https://wiki.ubuntu.com/Kernel/LTSEnablementStack -Importing hardware-enablement kernels -------------------------------------- - -Hardware-enablement kernels need to be imported to a cluster controller -before that cluster's nodes can use them. -In order to import HWE kernels to a cluster controller you need to edit -the controller's ``/etc/maas/bootresources.yaml`` file, and update the -subarches that you want to import, like this:: - - boot: - storage: "/var/lib/maas/boot-resources/" - - sources: - - path: "http://maas.ubuntu.com/images/ephemeral-v2/releases/" - keyring: "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg" - selections: - - release: "precise" - arches: ["i386", "amd64"] - subarches: ["generic", "hwe-q", "hwe-r", "hwe-s", "hwe-t"] - labels: ["release"] - -Once you've updated ``bootresources.yaml``, you can tell the cluster to -re-import its boot images using the ``maas`` command (You will need to -:ref:`be logged in to the API first `):: - - $ maas node-group import-boot-images \ - - -You can also tell the cluster controller to re-import its boot images by -clicking the ``Import boot images`` button in the ``Clusters`` page of -the MAAS web UI. +Booting hardware-enablement kernels +----------------------------------- -Using hardware-enablement kernels in MAAS ------------------------------------------ +MAAS imports hardware-enablement kernels along with its generic boot images, +but as different "sub-architectures" to the default "generic" one. -A MAAS administrator can choose to use HWE kernels on a per-node basis -in MAAS. +So, for example, a common server might have architecture and sub-architecture +of ``amd64/generic``, but some newer system chassis which doesn't become +fully functional with the default kernel for Ubuntu 14.04 Trusty Tahr, for +example, may require ``amd64/hwe-t``. -The quickest way to do this is using the MAAS command, like this:: +The quickest way to make a node use a hardware-enablement kernel is by using +the MAAS command, like this:: $ maas node update architecture=amd64/hwe-t -If you specify an architecture that doesn't exist (e.g. -``amd64/hwe-z``), the ``maas`` command will return an error. - +If you specify an architecture that doesn't exist (e.g. ``amd64/hwe-zz``), +the ``maas`` command will return an error. It's also possible to use HWE kernels from the MAAS web UI, by visiting the Node's page and clicking ``Edit node``. Under the Architecture field, diff -Nru maas-1.5.4+bzr2294/docs/index.rst maas-1.7.6+bzr3376/docs/index.rst --- maas-1.5.4+bzr2294/docs/index.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/index.rst 2015-07-10 01:27:14.000000000 +0000 @@ -54,8 +54,12 @@ install configure cluster-configuration + static-ips + ipv6 + bootsources nodes hardware-enablement-kernels + sstreams-mirror networks kernel-options installing-ubuntu @@ -81,6 +85,7 @@ :maxdepth: 2 api + api_authentication maascli capabilities @@ -105,7 +110,6 @@ man/maas-region-admin.8 man/maas.8 - man/maas-import-pxe-files.8 *************** @@ -122,6 +126,7 @@ development/security development/building-packages development/cluster-registration + development/cluster-bootstrap development/tagging development/lease-scanning-and-dns development/preseeds diff -Nru maas-1.5.4+bzr2294/docs/installing-ubuntu.rst maas-1.7.6+bzr3376/docs/installing-ubuntu.rst --- maas-1.5.4+bzr2294/docs/installing-ubuntu.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/installing-ubuntu.rst 2015-07-10 01:27:14.000000000 +0000 @@ -16,36 +16,34 @@ There are two ways to install Ubuntu on a node: -1. :ref:`The default installer `. +1. :ref:`The Debian installer `. 2. :ref:`The fast installer `. .. _Juju: http://juju.ubuntu.com -.. _default-installer: +.. _debian-installer: -The default installer +The Debian Installer ---------------------- -The default installer installs Ubuntu on a node in exactly the same way +The Debian Installer installs Ubuntu on a node in exactly the same way as you would install it manually: using the `Debian Installer`_. -Installation is handled by the Debian installer. Answers to the -questions asked by the installer are provided in a 'preseed' file. For -more information on preseed files, see the :ref:`Additional +Answers to the questions asked by the installer are provided in a 'preseed' +file. For more information on preseed files, see the :ref:`Additional Configuration ` page. -As the name suggests, the default installer is enabled by default for -all new nodes. To enable the default installer for a node that's been -configured to use the fast installer, visit the node's page as an -administrator and click the ``Use the default installer`` button. +The Debian installer is not enabled by default. To enable it for a node that's +been configured to use the fast installer, visit the node's page as an +administrator and click the ``Use the Debian installer`` button. .. image:: media/node-page-use-default-installer.png -To set multiple nodes to use the default installer, select the ``Mark -nodes as using the default installer`` option from the bulk action menu +To set multiple nodes to use the Debian installer, select the ``Mark +nodes as using the Debian installer`` option from the bulk action menu on the ``Nodes`` page in the MAAS web UI. Because it installs Ubuntu from scratch, downloading packages as -required, the default installer is slower than the :ref:`fast installer +required, the Debian installer is slower than the :ref:`fast installer `. .. _Debian Installer: http://www.debian.org/devel/debian-installer/ @@ -57,22 +55,21 @@ The fast installer is, as the name suggests, a means of installing Ubuntu on a node more quickly than would be possible using the -:ref:`default installer `. +:ref:`Debian installer `. -To enable the fast installer for a node, visit the node's page as an -administrator and click the ``Use the fast installer`` button. +The fast installer is enabled by default for newly enlisted nodes. .. image:: media/node-page-use-fast-installer.png To set multiple nodes to use the fast installer, select the ``Mark nodes -as using the fase installer`` option from the bulk action menu on the +as using the fast installer`` option from the bulk action menu on the ``Nodes`` page in the MAAS web UI. The fast installer copies a pre-built Ubuntu image to the node, with all the packages installed that would be normally found in an Ubuntu installation. -The fast installer is much quicker than the default installer, but has +The fast installer is much quicker than the Debian installer, but has the disadvantage that it's less easy to configure a node at install time, since the fast installer doesn't use a :ref:`preseed file `. In addition, the packages that are initially installed on a diff -Nru maas-1.5.4+bzr2294/docs/install.rst maas-1.7.6+bzr3376/docs/install.rst --- maas-1.5.4+bzr2294/docs/install.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/install.rst 2015-07-10 01:27:14.000000000 +0000 @@ -4,7 +4,7 @@ Installing MAAS =============== -There are two main ways to install MAAS +There are two main ways to install MAAS: * :ref:`From Ubuntu's package archive on an existing Ubuntu install. ` @@ -12,7 +12,15 @@ media. ` If you are interested in testing the latest development version you -can also check out the very latest source and build MAAS yourself. +can also check out the very latest source and build MAAS — see +the :doc:`hacking guide ` — or use the `Testing PPA`_ or the +`Daily PPA`_. Note that these PPAs contain development versions of +MAAS that are potentially unstable, so use with caution. + +.. _Testing PPA: + https://launchpad.net/~maas-maintainers/+archive/ubuntu/testing +.. _Daily PPA: + https://launchpad.net/~maas-maintainers/+archive/ubuntu/dailybuilds .. _pkg-install: @@ -232,28 +240,29 @@ Import the boot images ---------------------- -MAAS will check for and download new Ubuntu images once a week. To avoid -having to wait that long, you'll need to download them manually the first time -once you have set up your MAAS region and cluster controllers. Do it again if -you add a cluster controller later, so that the new cluster controller also has -the images. - -There are two ways to start the import: through the web user interface, or -through the remote API. - -To do it in the web user interface, go to the Clusters tab and click the -"Import boot images" button at the bottom of the list of cluster -controllers. +Since version 1.7, MAAS stores the boot images in the region controller's +database, from where the cluster controllers will synchronise with the region +and pull images from the region to the cluster's local disk. This process +is automatic and MAAS will check for and download new Ubuntu images every hour. + +However, on a new installation you'll need to start the import process manually +once you have set up your MAAS region controller. There are two ways to start +the import: through the web user interface, or through the remote API. + +To do it in the web user interface, go to the Images tab, check the boxes to +say which images you want to import, and click the "Import images" button at +the bottom of the Ubuntu section. .. image:: media/import-images.* A message will appear to let you know that the import has started, and after a while, the warnings about the lack of boot images will disappear. -It may take longer for the exact boot images you need to be downloaded. Give -the import time to run; do not click the "Import boot images" button again -until the script has had time to download several hundred megabytes from the -archive server. +It may take a long time, depending on the speed of your Internet connection for +import process to complete, as the images are several hundred megabytes. The +import process will only download images that have changed since last import. +You can check the progress of the import by hovering over the spinner next to +each image. The other way to start the import is through the :ref:`region-controller API `, which you can invoke most @@ -263,16 +272,22 @@ See :ref:`Logging in ` for how to get set up with this tool. Then, run the command:: - $ maas my-maas-session node-groups import-boot-images + $ maas my-maas-session boot-resources import (Substitute a different profile name for 'my-maas-session' if you have named yours something else.) This will initiate the download, just as if you had -clicked "Import boot images" in the web user interface. +clicked "Import images" in the web user interface. -.. note:: - This API command is only available in MAAS versions 1.3 and above. - If you are using an earlier version, you will need to run the shell command - ``sudo maas-import-pxe-files`` on each of your cluster controllers. +By default, the import is configured to download the most recent LTS release +only for the amd64 architecture. Although this should suit most needs, you can +change the selections on the Images tab, or over the API. Read +:doc:`customise boot sources ` to see examples on how to do that. + + +Speeding up repeated image imports by using a local mirror +---------------------------------------------------------- +See :doc:`sstreams-mirror` for information on how to set up a mirror and +configure MAAS to use it. Configure DHCP diff -Nru maas-1.5.4+bzr2294/docs/ipv6.rst maas-1.7.6+bzr3376/docs/ipv6.rst --- maas-1.5.4+bzr2294/docs/ipv6.rst 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/ipv6.rst 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,150 @@ +.. -*- mode: rst -*- + +.. _ipv6: + +Managing IPv6 Networks +====================== + +.. note:: + + This feature is available in MAAS versions 1.7 and above, starting with + lp:maas revision 2992. If you're writing a client application that makes use + of this feature, you can query the region-server API for the + ``ipv6-deployment-ubuntu`` :ref:`capability`. + +MAAS has limited IPv6 support for networking nodes. It works much like IPv4 +support, but with a number of limitations: + +* Nodes still boot, register, and install using the IPv4 network. +* IPv6 addresses are only configured when using the default Ubuntu installer. +* Most BMCs can only be controlled (e.g. to power nodes on/off) using IPv4. +* MAAS still uses IPv4 for its internal operation, installing nodes, etc. +* For now, MAAS only supports IPv6 on networks where it also manages IPv4 DHCP. +* A network interface on a node can only be on one IPv6 subnet. +* A network interface on a cluster controller can manage only one IPv6 subnet. + +The web user interface and REST API can be accessed in the same way on both +IPv4 and IPv6. To use an IPv6 address as the hostname in a URL, in your +browser or elsewhere, surround it with square brackets. For example, on the +local machine (``::1``, the IPv6 equivalent of ``localhost``) you might +request:: + + http://[::1]/MAAS/ + +If your MAAS server has a DNS hostname that resolves to both IPv4 and IPv6 +addresses, your browser may already be accessing the UI through IPv6 without +you noticing. + + +Enabling IPv6 +------------- + +You enable IPv6 networking in the same way that you enable IPv4 networking: +configure a separate cluster interface for your IPv6 subnet, in addition to the +one you need for your IPv4 subnet. The IPv6 cluster interface must define a +static address range. Provided that you already have a functioning IPv6 +network, that's all there is to it. The following sections will go into more +detail about what is supported, what is needed, and what to do if you don't yet +have a functioning IPv6 network. + +An IPv6 cluster interface can use the same network interface on the cluster +controller as an existing IPv4 network interface. It just defines a different +subnet, with IPv6 addressing. A node that's connected to the IPv4 subnet will +also be connected to the IPv6 subnet on the same network segment. + + +Configuring your IPv6 subnet +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When you configure your IPv6 cluster interface, be sure to define a static IP +address range. Deployed nodes on the subnet will get static addresses in this +range. + +IPv6 networks are normally 64 bits wide, so you can be generous with the +ranges' sizes. It also means that you can leave the netmask field blank. + +(There are no broadcast addresses in IPv6, so leave the broadcast address field +blank as well.) + +You may want MAAS to manage DHCP and DNS, but it's not required. In fact nodes +do not need a DHCP server at all for IPv6; MAAS configures static IPv6 +addresses on the node's network interfaces while deploying it. A DHCPv6 server +can provide addresses for containers or virtual machines running on the nodes, +as well as devices on the network that are not managed by MAAS, but it is not +needed for the nodes themselves. MAAS will not be aware of any addresses +issued by DHCP, and does not guarantee that they will stay unchanged. + + +.. _ipv6-routing: + +Routing +^^^^^^^ + +In IPv6, clients do not discover routes through DHCP. Routers make themselves +known on their networks by sending out *route advertisements*. These *RAs* +contain other configuration as well: whether clients should statelessly +configure their own unique IP addresses based on their MAC addresses; whether +they should request stateless configuration from a DHCP server; and finally, +whether they should request a stateful IP address from a DHCP server. Since a +network interface can have any number of IPv6 addresses even on a single +subnet, several of these address assignment mechanisms can be combined. + +However, when MAAS configures IPv6 networking on a node, it does not rely on +RAs. it statically configures your nodes' default IPv6 route to use the router +that is configured on the cluster interface, so that the nodes will know their +default gateway. They do not need DHCP and will not autoconfigure global +addresses. + +However, if you are planning to operate DHCPv6 clients as well, e.g. on +machines not managed by MAAS or on virtual machines hosted by MAAS nodes, you +may still want to have RAs configured to make those clients obtain +configuration over DHCP. + +If you need RAs but your gateway does not send them, install and configure +``radvd`` somewhere on the network to advertise its route. + + +Other installers and operating systems +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Static IPv6 addresses are currently only configured on Ubuntu, when installed +using the "fast" installer. Other operating systems, or Ubuntu with the +classic Debian installer, will not have their IPv6 addresses configured. +The same applies when a user manually installs an operating system on a node, +or overwrites its networking configuration: the node will no longer have its +static IPv6 address configured, even if MAAS has allocated it to the node. + +However, as long as the address remains allocated to the node, you may still +configure its operating system to use that address. The node can then use that +address as if it had been configured by MAAS. + + +Disabling IPv4 +-------------- + +For advanced users, there is an experimental capability to deploy nodes with +pure IPv6, with IPv4 networking disabled. To enable this on a node, check the +"Disable IPv4 when deployed" box on the node's Edit page. The process of +managing and deploying the node will still largely work through IPv4, but once +deployed, the node will have IPv6 networking only. + +In practice nodes may not be functional without IPv4 networking. A few things +are known to be needed in any case: + + +Configuring the MAAS URL +^^^^^^^^^^^^^^^^^^^^^^^^ + +The *maas-cluster-controller* package has a configuration item for the URL +where nodes and cluster controllers can reach the MAAS region API. + +By default, this URL is set based on the region controller's IPv4 address. To +make it work for nodes that won't have IP4, you must set the MAAS URL to use +a hostname instead of an IP address. The hostname must resolve to both IPv4 +and IPv6 addresses, and both on the cluster controller and on the nodes. + +To change this setting, run:: + + dpkg-reconfigure maas-cluster-controller + +It will prompt you for the URL, with its current setting as the initial value. diff -Nru maas-1.5.4+bzr2294/docs/juju-quick-start.rst maas-1.7.6+bzr3376/docs/juju-quick-start.rst --- maas-1.5.4+bzr2294/docs/juju-quick-start.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/juju-quick-start.rst 2015-07-10 01:27:14.000000000 +0000 @@ -94,7 +94,7 @@ Bootstrap:: - $ juju --sync-tools + $ juju sync-tools $ juju bootstrap If bootstrapping on a version of juju older than 1.14.0 then use:: diff -Nru maas-1.5.4+bzr2294/docs/maascli.rst maas-1.7.6+bzr3376/docs/maascli.rst --- maas-1.5.4+bzr2294/docs/maascli.rst 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/docs/maascli.rst 2015-07-10 01:27:14.000000000 +0000 @@ -38,9 +38,9 @@ command line. To obtain the key through the command line, run this command on the region -controller:: +controller (it requires root access):: - $ maas-region-admin apikey my-username + $ sudo maas-region-admin apikey my-username (Substitute your MAAS user name for my-username). @@ -324,7 +324,7 @@ node-groups ^^^^^^^^^^^ Usage: maas node-groups [-d --debug] [-h --help] [-k ---insecure] register | list | refresh-workers | accept | reject +--insecure] register | list | accept | reject .. program:: maas node-groups @@ -354,14 +354,6 @@ Returns a JSON list of all currently defined node groups. -:samp:`refresh_workers` - - It sounds a bit like they will get a cup of tea and a - biscuit. Actually this just sends each node-group worker an update - of its credentials (API key, node-group name). This command is - usually not needed at a user level, but is often used by worker - nodes. - :samp:`accept ` Accepts a node-group or number of nodegroups indicated by the @@ -475,7 +467,7 @@ :samp:`new {{ node_form.hostname }} -
Default is MAC-based, e.g. "node-aabbccddeeff" +
+ MAAS will generate an arbitrary name if you don't provide one.

- + + {{ node_form.osystem }} +

+

+ {{ node_form.distro_series }}

+

{{ node_form.nodegroup }} diff -Nru maas-1.5.4+bzr2294/src/maasserver/templates/metadataserver/nodecommissionresult.html maas-1.7.6+bzr3376/src/maasserver/templates/metadataserver/nodecommissionresult.html --- maas-1.5.4+bzr2294/src/maasserver/templates/metadataserver/nodecommissionresult.html 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/templates/metadataserver/nodecommissionresult.html 2015-07-10 01:27:14.000000000 +0000 @@ -5,6 +5,14 @@ {% block page-title %}Commissioning result for node: {{ object.node.hostname }}{% endblock %} {% block content %} +

  • Output file

    @@ -26,15 +34,32 @@

    Script return value

    {{ object.script_result }}
  • -
  • {% if object.data %}

    Output

    -
    -{{ object.get_data_as_html }}
    -
    + {% if object.name == "99-maas-02-capture-lldp.out" or object.name == "00-maas-01-lshw.out" %} +
    + +
    +
    +
    {{ object.get_data_as_yaml_html }}
    +
    +
    +
    {{ object.get_data_as_html }}
    +
    +
    +
    + {% else %} +
    +          {{ object.get_data_as_html }}
    +        
    + {% endif %} {% endif %}
- {% endblock %} + + diff -Nru maas-1.5.4+bzr2294/src/maasserver/templates/metadataserver/nodecommissionresult_list.html maas-1.7.6+bzr3376/src/maasserver/templates/metadataserver/nodecommissionresult_list.html --- maas-1.5.4+bzr2294/src/maasserver/templates/metadataserver/nodecommissionresult_list.html 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/templates/metadataserver/nodecommissionresult_list.html 2015-07-10 01:27:14.000000000 +0000 @@ -19,7 +19,7 @@ {% endif %} {% if paginator.count == 0 %} -

No matching commissioning results.

+

No matching commissioning results.

{% else %} diff -Nru maas-1.5.4+bzr2294/src/maasserver/templates/metadataserver/nodeinstallresult.html maas-1.7.6+bzr3376/src/maasserver/templates/metadataserver/nodeinstallresult.html --- maas-1.5.4+bzr2294/src/maasserver/templates/metadataserver/nodeinstallresult.html 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/templates/metadataserver/nodeinstallresult.html 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,35 @@ +{% extends "maasserver/base.html" %} + +{% block nav-active-settings %}active{% endblock %} +{% block title %}Installing result for node {{ object.node.hostname }}, script {{ object.name }}{% endblock %} +{% block page-title %}Installing result for node: {{ object.node.hostname }}{% endblock %} + +{% block content %} +
    +
  • +

    Output file

    + {{ object.name }} +
  • +
  • +

    Node

    + + + {{ object.node.hostname }} + + +
  • +
  • +

    Result registered at

    + {{ object.created }} +
  • +
  • + {% if object.data %} +

    Output

    +
    +{{ object.get_data_as_html }}
    +
    + {% endif %} +
  • +
+ +{% endblock %} diff -Nru maas-1.5.4+bzr2294/src/maasserver/templates/registration/login.html maas-1.7.6+bzr3376/src/maasserver/templates/registration/login.html --- maas-1.5.4+bzr2294/src/maasserver/templates/registration/login.html 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/templates/registration/login.html 2015-07-10 01:27:14.000000000 +0000 @@ -23,9 +23,9 @@ No users pictogram

No admin user has been created yet

- Use the "createsuperuser" administration command to create one: + Use the "createadmin" administration command to create one:

-
{{ create_command }} createsuperuser
+
{{ create_command }} createadmin
login {% else %}

Login to {% include "maasserver/site_title.html" %}

@@ -33,7 +33,12 @@

Your username and password didn't match. Please try again.

{% endif %} - {% csrf_token %} + {% comment %} + We turn off autocompletion of the login form in production + environments. Autocompletion, in combination with cross-site scripting + attacks, can potentially allow remote attackers to steal credentials. + {% endcomment %} + {% csrf_token %}
    {% for field in form %} diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/api.py maas-1.7.6+bzr3376/src/maasserver/testing/api.py --- maas-1.5.4+bzr2294/src/maasserver/testing/api.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/api.py 2015-07-10 01:27:14.000000000 +0000 @@ -41,7 +41,7 @@ class ExampleTest(MultipleUsersScenarios, MAASServerTestCase): scenarios = [ ('anon', dict(userfactory=lambda: AnonymousUser())), - ('user', dict(userfactory=factory.make_user)), + ('user', dict(userfactory=factory.make_User)), ('admin', dict(userfactory=factory.make_admin)), ] @@ -62,7 +62,7 @@ super(MultipleUsersScenarios, self).setUp() user = self.userfactory() if not user.is_anonymous(): - password = factory.getRandomString() + password = factory.make_string() user.set_password(password) user.save() self.logged_in_user = user @@ -81,7 +81,7 @@ def setUp(self): super(APITestCase, self).setUp() - self.logged_in_user = factory.make_user( + self.logged_in_user = factory.make_User( username='test', password='test') self.client = OAuthAuthenticatedClient(self.logged_in_user) @@ -98,8 +98,8 @@ def log_in_as_normal_user(client): """Log `client` in as a normal user.""" - password = factory.getRandomString() - user = factory.make_user(password=password) + password = factory.make_string() + user = factory.make_User(password=password) client.login(username=user.username, password=password) return user diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/eventloop.py maas-1.7.6+bzr3376/src/maasserver/testing/eventloop.py --- maas-1.5.4+bzr2294/src/maasserver/testing/eventloop.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/eventloop.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,103 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Testing utilities for the region event-loop.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "RegionEventLoopFixture", + "RunningEventLoopFixture", +] + +from crochet import wait_for_reactor +from fixtures import Fixture +from maasserver import eventloop +from maasserver.eventloop import loop +from twisted.application.service import Service + + +class RegionEventLoopFixture(Fixture): + """Stubs-out services in the event-loop to avoid side-effects. + + Sometimes we need only a single service, or no services, running + when starting the event-loop. This fixture, by default, will stub- + out all services by switching their factory callable out. This means + that the services will be created, started, and stopped, but they + won't do anything. + """ + + def __init__(self, *services): + super(RegionEventLoopFixture, self).__init__() + self.services = services + + def checkEventLoopClean(self): + # Don't proceed if the event-loop is running. + if loop.services.running: + raise RuntimeError( + "The event-loop has been left running; this fixture cannot " + "make a reasonable decision about what to do next.") + # Don't proceed if any services are registered. + services = list(loop.services) + if services != []: + raise RuntimeError( + "One or more services are registered; this fixture cannot " + "make a reasonable decision about what to do next. " + "The services are: %s." + % ', '.join(service.name for service in services)) + + def setUp(self): + super(RegionEventLoopFixture, self).setUp() + # Check that the event-loop is dormant and clean. + self.checkEventLoopClean() + # Ensure the event-loop will be left in a consistent state. + self.addCleanup(self.checkEventLoopClean) + # Restore the current `factories` tuple on exit. + self.addCleanup(setattr, loop, "factories", loop.factories) + # Set the new `factories` tuple, with all factories stubbed-out + # except those in `self.services`. + loop.factories = tuple( + (name, (factory if name in self.services else Service)) + for name, factory in loop.factories) + + +class RunningEventLoopFixture(Fixture): + """Starts and stops the region's event-loop. + + Note that this does *not* start and stop the Twisted reactor. Typically in + region tests you'll find that the reactor is always running as a + side-effect of importing :py:mod:`maasserver.eventloop`. + """ + + @wait_for_reactor + def start(self): + return eventloop.start() + + @wait_for_reactor + def stop(self): + return eventloop.reset() + + def checkEventLoopClean(self): + # Don't proceed if the event-loop is running. + if loop.services.running: + raise RuntimeError( + "The event-loop has been left running; this fixture cannot " + "make a reasonable decision about what to do next.") + + def setUp(self): + super(RunningEventLoopFixture, self).setUp() + # Check that the event-loop is dormant and clean. + self.checkEventLoopClean() + # Check that the event-loop will be left dormant and clean. + self.addCleanup(self.checkEventLoopClean) + # Stop the event-loop on exit. + self.addCleanup(self.stop) + # Start the event-loop. + self.start() diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/factory.py maas-1.7.6+bzr3376/src/maasserver/testing/factory.py --- maas-1.5.4+bzr2294/src/maasserver/testing/factory.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/factory.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,48 +14,79 @@ __metaclass__ = type __all__ = [ "factory", + "Messages", ] +import hashlib from io import BytesIO +import logging import random import time from django.contrib.auth.models import User +from django.test.client import RequestFactory from maasserver.clusterrpc.power_parameters import get_power_types from maasserver.enum import ( + BOOT_RESOURCE_FILE_TYPE, + BOOT_RESOURCE_TYPE, + IPADDRESS_TYPE, + NODE_BOOT, NODE_STATUS, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, + POWER_STATE, + ) +from maasserver.fields import ( + LargeObjectFile, + MAC, ) -from maasserver.fields import MAC from maasserver.models import ( - BootImage, + BootResource, + BootResourceFile, + BootResourceSet, + BootSource, + BootSourceCache, + BootSourceSelection, DHCPLease, DownloadProgress, + Event, + EventType, FileStorage, + LargeFile, + LicenseKey, MACAddress, + MACStaticIPAddressLink, Network, Node, NodeGroup, NodeGroupInterface, SSHKey, + SSLKey, + StaticIPAddress, Tag, Zone, ) -from maasserver.models.node import NODE_TRANSITIONS -from maasserver.testing import ( - get_data, - reload_object, +from maasserver.models.bootresourceset import ( + COMMISSIONABLE_SET, + INSTALL_SET, + XINSTALL_TYPES, ) -from maasserver.utils import map_enum +from maasserver.node_status import NODE_TRANSITIONS +from maasserver.testing import get_data +from maasserver.testing.orm import reload_object import maastesting.factory from maastesting.factory import NO_VALUE +from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin from metadataserver.models import ( CommissioningScript, - NodeCommissionResult, + NodeResult, + ) +from netaddr import ( + IPAddress, + IPRange, ) -from netaddr import IPAddress +from provisioningserver.utils.enum import map_enum # We have a limited number of public keys: # src/maasserver/tests/data/test_rsa{0, 1, 2, 3, 4}.pub @@ -65,8 +96,37 @@ ALL_NODE_STATES = map_enum(NODE_STATUS).values() +class Messages: + """A class to record messages published by Django messaging + framework. + """ + + def __init__(self): + self.messages = [] + + def add(self, level, message, extras): + self.messages.append((level, message, extras)) + + def __iter__(self): + for message in self.messages: + yield message + + class Factory(maastesting.factory.Factory): + def make_fake_request(self, path, method="GET"): + """Create a fake request. + + :param path: The path to which to make the request. + :param method: The method to use for the request + ('GET' or 'POST'). + """ + rf = RequestFactory() + request = rf.get(path) + request.method = method + request._messages = Messages() + return request + def make_file_upload(self, name=None, content=None): """Create a file-like object for upload in http POST or PUT. @@ -83,7 +143,7 @@ :return: A file-like object, with the requested `content` and `name`. """ if content is None: - content = self.getRandomString().encode('ascii') + content = self.make_string().encode('ascii') if name is None: name = self.make_name('file') assert isinstance(content, bytes) @@ -91,7 +151,7 @@ upload.name = name return upload - def getRandomEnum(self, enum, but_not=None): + def pick_enum(self, enum, but_not=None): """Pick a random item from an enumeration class. :param enum: An enumeration class such as `NODE_STATUS`. @@ -105,7 +165,7 @@ value for value in list(map_enum(enum).values()) if value not in but_not]) - def getRandomChoice(self, choices, but_not=None): + def pick_choice(self, choices, but_not=None): """Pick a random item from `choices`. :param choices: A sequence of choices in Django form choices format: @@ -122,7 +182,7 @@ return random.choice( [choice for choice in choices if choice[0] not in but_not])[0] - def getRandomPowerType(self, but_not=None): + def pick_power_type(self, but_not=None): """Pick a random power type and return it. :param but_not: Exclude these values from result @@ -137,6 +197,11 @@ [choice for choice in list(get_power_types().keys()) if choice not in but_not]) + def pick_commissioning_release(self, osystem): + """Pick a random commissioning release from operating system.""" + releases = osystem.get_supported_commissioning_releases() + return random.choice(releases) + def _save_node_unchecked(self, node): """Save a :class:`Node`, but circumvent status transition checks.""" valid_initial_states = NODE_TRANSITIONS[None] @@ -146,10 +211,11 @@ finally: NODE_TRANSITIONS[None] = valid_initial_states - def make_node(self, mac=False, hostname=None, status=None, + def make_Node(self, mac=False, hostname=None, status=None, architecture="i386/generic", updated=None, created=None, nodegroup=None, routers=None, zone=None, - power_type=None, networks=None, sortable_name=False, + power_type=None, networks=None, boot_type=None, + sortable_name=False, power_state=None, disable_ipv4=None, **kwargs): """Make a :class:`Node`. @@ -161,23 +227,31 @@ """ # hostname=None is a valid value, hence the set_hostname trick. if hostname is None: - hostname = self.getRandomString(20) + hostname = self.make_string(20) if sortable_name: hostname = hostname.lower() if status is None: status = NODE_STATUS.DEFAULT if nodegroup is None: - nodegroup = self.make_node_group() + nodegroup = self.make_NodeGroup() if routers is None: routers = [self.make_MAC()] if zone is None: - zone = self.make_zone() + zone = self.make_Zone() if power_type is None: power_type = 'ether_wake' + if power_state is None: + power_state = self.pick_enum(POWER_STATE) + if disable_ipv4 is None: + disable_ipv4 = self.pick_bool() + if boot_type is None: + boot_type = self.pick_enum(NODE_BOOT) node = Node( hostname=hostname, status=status, architecture=architecture, nodegroup=nodegroup, routers=routers, zone=zone, - power_type=power_type, **kwargs) + power_type=power_type, disable_ipv4=disable_ipv4, + power_state=power_state, boot_type=boot_type, + **kwargs) self._save_node_unchecked(node) # We do not generate random networks by default because the limited # number of VLAN identifiers (4,094) makes it very likely to @@ -185,7 +259,7 @@ if networks is not None: node.networks.add(*networks) if mac: - self.make_mac_address(node=node) + self.make_MACAddress(node=node) # Update the 'updated'/'created' fields with a call to 'update' # preventing a call to save() from overriding the values. @@ -195,167 +269,287 @@ Node.objects.filter(id=node.id).update(created=created) return reload_object(node) - def get_interface_fields(self, ip=None, router_ip=None, network=None, - subnet_mask=None, broadcast_ip=None, + def get_interface_fields(self, name=None, ip=None, router_ip=None, + network=None, subnet_mask=None, broadcast_ip=None, ip_range_low=None, ip_range_high=None, - interface=None, management=None, **kwargs): + interface=None, management=None, + static_ip_range_low=None, + static_ip_range_high=None, **kwargs): + """Return a dict of parameters for a cluster interface. + + These are the values that go into a `NodeGroupInterface` model object + or form, except the `NodeGroup`. All IP address fields are unicode + strings. + + The `network` parameter is not included in the result, but if you + pass an `IPNetwork` as its value, this will be the network that the + cluster interface will be attached to. Its IP address, netmask, and + address ranges will be taken from `network`. + """ + if name is None: + name = factory.make_name('ngi') if network is None: - network = factory.getRandomNetwork() + network = factory.make_ipv4_network() + # Split the network into dynamic and static ranges. + if network.size > 2: + middle = network.size // 2 + dynamic_range = IPRange(network.first, network[middle]) + static_range = IPRange(network[middle + 1], network.last) + else: + dynamic_range = network + static_range = None if subnet_mask is None: subnet_mask = unicode(network.netmask) if broadcast_ip is None: broadcast_ip = unicode(network.broadcast) + if static_ip_range_low is None or static_ip_range_high is None: + if static_range is None: + static_ip_range_low = None + static_ip_range_high = None + else: + static_low = static_range.first + static_high = static_range.last + if static_ip_range_low is None: + static_ip_range_low = unicode(IPAddress(static_low)) + if static_ip_range_high is None: + static_ip_range_high = unicode(IPAddress(static_high)) if ip_range_low is None: - ip_range_low = unicode(IPAddress(network.first)) + ip_range_low = unicode(IPAddress(dynamic_range.first)) if ip_range_high is None: - ip_range_high = unicode(IPAddress(network.last)) + ip_range_high = unicode(IPAddress(dynamic_range.last)) if router_ip is None: - router_ip = factory.getRandomIPInNetwork(network) + router_ip = factory.pick_ip_in_network(network) if ip is None: - ip = factory.getRandomIPInNetwork(network) + ip = factory.pick_ip_in_network(network) if management is None: - management = factory.getRandomEnum(NODEGROUPINTERFACE_MANAGEMENT) + management = factory.pick_enum(NODEGROUPINTERFACE_MANAGEMENT) if interface is None: - interface = self.make_name('interface') + interface = self.make_name('netinterface') return dict( + name=name, subnet_mask=subnet_mask, broadcast_ip=broadcast_ip, ip_range_low=ip_range_low, ip_range_high=ip_range_high, + static_ip_range_low=static_ip_range_low, + static_ip_range_high=static_ip_range_high, router_ip=router_ip, ip=ip, management=management, interface=interface) - def make_node_group(self, name=None, uuid=None, cluster_name=None, - ip=None, router_ip=None, network=None, - subnet_mask=None, broadcast_ip=None, ip_range_low=None, - ip_range_high=None, interface=None, management=None, - status=None, maas_url='', **kwargs): + def make_NodeGroup(self, name=None, uuid=None, cluster_name=None, + dhcp_key=None, ip=None, router_ip=None, network=None, + subnet_mask=None, broadcast_ip=None, ip_range_low=None, + ip_range_high=None, interface=None, management=None, + status=None, maas_url='', static_ip_range_low=None, + static_ip_range_high=None, default_disable_ipv4=None, + **kwargs): """Create a :class:`NodeGroup`. + If `management` is set (to a `NODEGROUPINTERFACE_MANAGEMENT` value), + a :class:`NodeGroupInterface` will be created as well. + If network (an instance of IPNetwork) is provided, use it to populate subnet_mask, broadcast_ip, ip_range_low, ip_range_high, router_ip and - worker_ip. This is a convenience to setup a coherent network all in - one go. + worker_ip. This is a convenience for setting up a coherent network + all in one go. """ if status is None: - status = factory.getRandomEnum(NODEGROUP_STATUS) - if management is None: - management = NODEGROUPINTERFACE_MANAGEMENT.DHCP + status = factory.pick_enum(NODEGROUP_STATUS) if name is None: name = self.make_name('nodegroup') if uuid is None: - uuid = factory.getRandomUUID() + uuid = factory.make_UUID() if cluster_name is None: cluster_name = factory.make_name('cluster') - interface_settings = self.get_interface_fields( - ip=ip, router_ip=router_ip, network=network, - subnet_mask=subnet_mask, broadcast_ip=broadcast_ip, - ip_range_low=ip_range_low, ip_range_high=ip_range_high, - interface=interface, management=management) - interface_settings.update(kwargs) - return NodeGroup.objects.new( + if dhcp_key is None: + # TODO: Randomise this properly. + dhcp_key = '' + if default_disable_ipv4 is None: + default_disable_ipv4 = factory.pick_bool() + cluster = NodeGroup.objects.new( name=name, uuid=uuid, cluster_name=cluster_name, status=status, - maas_url=maas_url, **interface_settings) + dhcp_key=dhcp_key, maas_url=maas_url, + default_disable_ipv4=default_disable_ipv4) + if management is not None: + interface_settings = dict( + ip=ip, router_ip=router_ip, network=network, + subnet_mask=subnet_mask, broadcast_ip=broadcast_ip, + ip_range_low=ip_range_low, ip_range_high=ip_range_high, + interface=interface, management=management, + static_ip_range_low=static_ip_range_low, + static_ip_range_high=static_ip_range_high) + interface_settings.update(kwargs) + self.make_NodeGroupInterface(cluster, **interface_settings) + return cluster - def make_unrenamable_nodegroup_with_node(self): + def make_unrenamable_NodeGroup_with_Node(self): """Create a `NodeGroup` that can't be renamed, and `Node`. Node groups can't be renamed while they are in an accepted state, have DHCP and DNS management enabled, and have a node that is in allocated state. + The cluster will also have a managed interface. + :return: tuple: (`NodeGroup`, `Node`). """ name = self.make_name('original-name') - nodegroup = self.make_node_group( + nodegroup = self.make_NodeGroup( name=name, status=NODEGROUP_STATUS.ACCEPTED) - [interface] = nodegroup.get_managed_interfaces() - interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS - interface.save() - node = self.make_node( + factory.make_NodeGroupInterface( + nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) + node = self.make_Node( nodegroup=nodegroup, status=NODE_STATUS.ALLOCATED) return nodegroup, node - def make_node_group_interface(self, nodegroup, ip=None, - router_ip=None, network=None, - subnet_mask=None, broadcast_ip=None, - ip_range_low=None, ip_range_high=None, - interface=None, management=None, **kwargs): + def make_NodeGroupInterface(self, nodegroup, name=None, ip=None, + router_ip=None, network=None, + subnet_mask=None, broadcast_ip=None, + ip_range_low=None, ip_range_high=None, + interface=None, management=None, + static_ip_range_low=None, + static_ip_range_high=None, **kwargs): interface_settings = self.get_interface_fields( - ip=ip, router_ip=router_ip, network=network, + name=name, ip=ip, router_ip=router_ip, network=network, subnet_mask=subnet_mask, broadcast_ip=broadcast_ip, ip_range_low=ip_range_low, ip_range_high=ip_range_high, - interface=interface, management=management) + interface=interface, management=management, + static_ip_range_low=static_ip_range_low, + static_ip_range_high=static_ip_range_high) interface_settings.update(**kwargs) interface = NodeGroupInterface( nodegroup=nodegroup, **interface_settings) interface.save() return interface - def make_node_commission_result(self, node=None, name=None, - script_result=None, data=None): + def make_NodeResult_for_commissioning( + self, node=None, name=None, script_result=None, data=None): + """Create a `NodeResult` as one would see from commissioning a node.""" + if node is None: + node = self.make_Node() + if name is None: + name = "ncrname-" + self.make_string(92) + if data is None: + data = b"ncrdata-" + self.make_bytes() + if script_result is None: + script_result = random.randint(0, 10) + ncr = NodeResult( + node=node, name=name, script_result=script_result, + result_type=RESULT_TYPE.COMMISSIONING, data=Bin(data)) + ncr.save() + return ncr + + def make_NodeResult_for_installation( + self, node=None, name=None, script_result=None, data=None): + """Create a `NodeResult` as one would see from installing a node.""" if node is None: - node = self.make_node() + node = self.make_Node() if name is None: - name = "ncrname-" + self.getRandomString(92) + name = "ncrname-" + self.make_string(92) if data is None: - data = b"ncrdata-" + self.getRandomBytes() + data = b"ncrdata-" + self.make_bytes() if script_result is None: script_result = random.randint(0, 10) - ncr = NodeCommissionResult( - node=node, name=name, script_result=script_result, data=Bin(data)) + ncr = NodeResult( + node=node, name=name, script_result=script_result, + result_type=RESULT_TYPE.INSTALLATION, data=Bin(data)) ncr.save() return ncr def make_MAC(self): """Generate a random MAC address, in the form of a MAC object.""" - return MAC(self.getRandomMACAddress()) + return MAC(self.make_mac_address()) - def make_mac_address(self, address=None, node=None, networks=None): - """Create a MACAddress model object.""" - if node is None: - node = self.make_node() + def make_MACAddress(self, address=None, node=None, networks=None, + **kwargs): + """Create a `MACAddress` model object.""" if address is None: - address = self.getRandomMACAddress() - mac = MACAddress(mac_address=MAC(address), node=node) + address = self.make_mac_address() + mac = MACAddress(mac_address=MAC(address), node=node, **kwargs) mac.save() if networks is not None: mac.networks.add(*networks) return mac - def make_dhcp_lease(self, nodegroup=None, ip=None, mac=None): + def make_MACAddress_with_Node(self, address=None, node=None, networks=None, + **kwargs): + """Create a `MACAddress` model that is guaranteed to be linked + to a node. + """ + if node is None: + node = self.make_Node() + return self.make_MACAddress( + node=node, address=address, networks=networks, **kwargs) + + def make_node_with_mac_attached_to_nodegroupinterface( + self, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + network=None, disable_ipv4=False, **kwargs): + """Create a Node that has a MACAddress which has a + NodeGroupInterface. + + :param **kwargs: Additional parameters to pass to make_node. + """ + nodegroup = kwargs.pop("nodegroup", None) + if nodegroup is None: + nodegroup = self.make_NodeGroup() + node = self.make_Node( + mac=True, nodegroup=nodegroup, disable_ipv4=disable_ipv4, **kwargs) + ngi = self.make_NodeGroupInterface( + nodegroup, network=network, management=management) + mac = node.get_primary_mac() + mac.cluster_interface = ngi + mac.save() + return node + + def make_StaticIPAddress(self, ip=None, alloc_type=IPADDRESS_TYPE.AUTO, + mac=None, user=None): + """Create and return a StaticIPAddress model object. + + If a non-None `mac` is passed, connect this IP address to the + given MAC Address. + """ + if ip is None: + ip = self.make_ipv4_address() + ipaddress = StaticIPAddress(ip=ip, alloc_type=alloc_type, user=user) + ipaddress.save() + if mac is not None: + MACStaticIPAddressLink( + mac_address=mac, ip_address=ipaddress).save() + return ipaddress + + def make_DHCPLease(self, nodegroup=None, ip=None, mac=None): """Create a :class:`DHCPLease`.""" if nodegroup is None: - nodegroup = self.make_node_group() + nodegroup = self.make_NodeGroup() if ip is None: - ip = self.getRandomIPAddress() + ip = self.make_ipv4_address() if mac is None: - mac = self.getRandomMACAddress() + mac = self.make_mac_address() lease = DHCPLease(nodegroup=nodegroup, ip=ip, mac=MAC(mac)) lease.save() return lease def make_email(self): - return '%s@example.com' % self.getRandomString(10) + return '%s@example.com' % self.make_string(10) - def make_user(self, username=None, password='test', email=None): + def make_User(self, username=None, password='test', email=None): if username is None: - username = self.getRandomUsername() + username = self.make_username() if email is None: email = self.make_email() return User.objects.create_user( username=username, password=password, email=email) - def make_sshkey(self, user, key_string=None): + def make_SSHKey(self, user, key_string=None): if key_string is None: key_string = get_data('data/test_rsa0.pub') key = SSHKey(key=key_string, user=user) key.save() return key - def make_tag(self, name=None, definition=None, comment='', + def make_Tag(self, name=None, definition=None, comment='', kernel_opts=None, created=None, updated=None): if name is None: name = self.make_name('tag') @@ -386,7 +580,7 @@ "add more keys in src/maasserver/tests/data/." % MAX_PUBLIC_KEYS) if user is None: - user = self.make_user(**kwargs) + user = self.make_User(**kwargs) keys = [] for i in range(n_keys): key_string = get_data('data/test_rsa%d.pub' % i) @@ -395,15 +589,38 @@ keys.append(key) return user, keys + def make_user_with_ssl_keys(self, n_keys=2, user=None, **kwargs): + """Create a user with n `SSLKey`. + + :param n_keys: Number of keys to add to user. + :param user: User to add keys to. If user is None, then user is made + with make_user. Additional keyword arguments are passed to + `make_user()`. + """ + if n_keys > MAX_PUBLIC_KEYS: + raise RuntimeError( + "Cannot create more than %d public keys. If you need more: " + "add more keys in src/maasserver/tests/data/." + % MAX_PUBLIC_KEYS) + if user is None: + user = self.make_User(**kwargs) + keys = [] + for i in range(n_keys): + key_string = get_data('data/test_x509_%d.pem' % i) + key = SSLKey(user=user, key=key_string) + key.save() + keys.append(key) + return user, keys + def make_admin(self, username=None, password='test', email=None): if username is None: - username = self.getRandomUsername() + username = self.make_username() if email is None: email = self.make_email() return User.objects.create_superuser( username, password=password, email=email) - def make_file_storage(self, filename=None, content=None, owner=None): + def make_FileStorage(self, filename=None, content=None, owner=None): fake_file = self.make_file_upload(filename, content) return FileStorage.objects.save_file(fake_file.name, fake_file, owner) @@ -414,72 +631,49 @@ header items that you wish to override. """ items = { - 'realm': self.getRandomString(), + 'realm': self.make_string(), 'oauth_nonce': random.randint(0, 99999), 'oauth_timestamp': time.time(), - 'oauth_consumer_key': self.getRandomString(18), + 'oauth_consumer_key': self.make_string(18), 'oauth_signature_method': 'PLAINTEXT', 'oauth_version': '1.0', - 'oauth_token': self.getRandomString(18), - 'oauth_signature': "%%26%s" % self.getRandomString(32), + 'oauth_token': self.make_string(18), + 'oauth_signature': "%%26%s" % self.make_string(32), } items.update(kwargs) return "OAuth " + ", ".join([ '%s="%s"' % (key, value) for key, value in items.items()]) - def make_boot_image(self, architecture=None, subarchitecture=None, - release=None, purpose=None, nodegroup=None, - label=None): - if architecture is None: - architecture = self.make_name('architecture') - if subarchitecture is None: - subarchitecture = self.make_name('subarchitecture') - if release is None: - release = self.make_name('release') - if purpose is None: - purpose = self.make_name('purpose') - if nodegroup is None: - nodegroup = self.make_node_group() - if label is None: - label = self.make_name('label') - return BootImage.objects.create( - nodegroup=nodegroup, - architecture=architecture, - subarchitecture=subarchitecture, - release=release, - purpose=purpose, - label=label) - - def make_commissioning_script(self, name=None, content=None): + def make_CommissioningScript(self, name=None, content=None): if name is None: name = self.make_name('script') if content is None: - content = b'content:' + self.getRandomString().encode('ascii') + content = b'content:' + self.make_string().encode('ascii') return CommissioningScript.objects.create( name=name, content=Bin(content)) - def make_download_progress(self, nodegroup=None, filename=None, - size=NO_VALUE, bytes_downloaded=NO_VALUE, - error=None): + def make_DownloadProgress(self, nodegroup=None, filename=None, + size=NO_VALUE, bytes_downloaded=NO_VALUE, + error=None): """Create a `DownloadProgress` in some poorly-defined state. If you have specific wishes about the object's state, you'll want to - use one of the specialized `make_download_progress_*` methods instead. + use one of the specialized `make_DownloadProgress_*` methods instead. Pass a `size` of `None` to indicate that total file size is not yet known. The default picks either a random number, or None. """ if nodegroup is None: - nodegroup = self.make_node_group() + nodegroup = self.make_NodeGroup() if filename is None: filename = self.make_name('download') if size is NO_VALUE: - if self.getRandomBoolean(): + if self.pick_bool(): size = random.randint(0, 1000000000) else: size = None if bytes_downloaded is NO_VALUE: - if self.getRandomBoolean(): + if self.pick_bool(): if size is None: max_size = 1000000000 else: @@ -488,36 +682,36 @@ else: bytes_downloaded = None if error is None: - if self.getRandomBoolean(): - error = self.getRandomString() + if self.pick_bool(): + error = self.make_string() else: error = '' return DownloadProgress.objects.create( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=bytes_downloaded) - def make_download_progress_initial(self, nodegroup=None, filename=None, - size=NO_VALUE): + def make_DownloadProgress_initial(self, nodegroup=None, filename=None, + size=NO_VALUE): """Create a `DownloadProgress` as reported before a download.""" - return self.make_download_progress( + return self.make_DownloadProgress( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=None, error='') - def make_download_progress_success(self, nodegroup=None, filename=None, - size=None): + def make_DownloadProgress_success(self, nodegroup=None, filename=None, + size=None): """Create a `DownloadProgress` indicating success.""" if size is None: size = random.randint(0, 1000000000) - return self.make_download_progress( + return self.make_DownloadProgress( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=size, error='') - def make_download_progress_incomplete(self, nodegroup=None, filename=None, - size=NO_VALUE, - bytes_downloaded=None): + def make_DownloadProgress_incomplete(self, nodegroup=None, filename=None, + size=NO_VALUE, + bytes_downloaded=None): """Create a `DownloadProgress` that's not done yet.""" if size is NO_VALUE: - if self.getRandomBoolean(): + if self.pick_bool(): # File can't be empty, or the download can't be incomplete. size = random.randint(1, 1000000000) else: @@ -528,21 +722,21 @@ else: max_size = size bytes_downloaded = random.randint(0, max_size - 1) - return self.make_download_progress( + return self.make_DownloadProgress( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=bytes_downloaded, error='') - def make_download_progress_failure(self, nodegroup=None, filename=None, - size=NO_VALUE, - bytes_downloaded=NO_VALUE, error=None): + def make_DownloadProgress_failure(self, nodegroup=None, filename=None, + size=NO_VALUE, + bytes_downloaded=NO_VALUE, error=None): """Create a `DownloadProgress` indicating failure.""" if error is None: - error = self.getRandomString() - return self.make_download_progress_incomplete( + error = self.make_string() + return self.make_DownloadProgress_incomplete( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=bytes_downloaded, error=error) - def make_zone(self, name=None, description=None, nodes=None, + def make_Zone(self, name=None, description=None, nodes=None, sortable_name=False): """Create a physical `Zone`. @@ -557,38 +751,19 @@ if sortable_name: name = name.lower() if description is None: - description = self.getRandomString() + description = self.make_string() zone = Zone(name=name, description=description) zone.save() if nodes is not None: zone.node_set.add(*nodes) return zone - def make_vlan_tag(self, allow_none=False, but_not=None): - """Create a random VLAN tag. + make_zone = make_Zone - :param allow_none: Whether `None` ("no VLAN") can be allowed as an - outcome. If `True`, `None` will be included in the possible - results with a deliberately over-represented probability, in order - to help trip up bugs that might only show up once in about 4094 - calls otherwise. - :param but_not: A list of tags that should not be returned. Any zero - or `None` entries will be ignored. - """ - if but_not is None: - but_not = [] - if allow_none and random.randint(0, 1) == 0: - return None - else: - for _ in range(100): - vlan_tag = random.randint(1, 0xffe) - if vlan_tag not in but_not: - return vlan_tag - raise maastesting.factory.TooManyRandomRetries( - "Could not find an available VLAN tag.") - - def make_network(self, name=None, network=None, vlan_tag=NO_VALUE, - description=None, sortable_name=False): + def make_Network(self, name=None, network=None, vlan_tag=NO_VALUE, + description=None, sortable_name=False, + disjoint_from=None, default_gateway=None, + dns_servers=None): """Create a `Network`. :param network: An `IPNetwork`. If given, the `ip` and `netmask` @@ -601,6 +776,8 @@ by name, where the database and the python code may have different ideas about collation orders, especially when it comes to case differences. + :param disjoint_from: List of other `Network` or `IPNetwork` objects + whose IP ranges the new network must not overlap with. """ if name is None: name = factory.make_name() @@ -609,21 +786,36 @@ # case-sensitive and case-insensitive ordering, so use lower-case # only. name = name.lower() + if disjoint_from is None: + disjoint_from = [] + # disjoint_from may contain both Network and IPNetwork. Normalise to + # all IPNetwork objects. + disjoint_from = [ + entry.get_network() if isinstance(entry, Network) else entry + for entry in disjoint_from + ] if network is None: - network = self.getRandomNetwork() + network = self.make_ipv4_network(disjoint_from=disjoint_from) + if default_gateway is None and self.pick_bool(): + default_gateway = self.pick_ip_in_network(network) + if dns_servers is None and self.pick_bool(): + dns_servers = " ".join( + self.make_ipv4_address() + for _ in range(random.choice((1, 2)))) ip = unicode(network.ip) netmask = unicode(network.netmask) if description is None: - description = self.getRandomString() + description = self.make_string() if vlan_tag is NO_VALUE: vlan_tag = self.make_vlan_tag() network = Network( name=name, ip=ip, netmask=netmask, vlan_tag=vlan_tag, - description=description) + description=description, default_gateway=default_gateway, + dns_servers=dns_servers) network.save() return network - def make_networks(self, number, with_vlans=True, **kwargs): + def make_Networks(self, number, with_vlans=True, **kwargs): """Create multiple networks. This avoids accidentally clashing VLAN tags. @@ -644,10 +836,209 @@ "Could not generate %d non-clashing VLAN tags" % number) else: vlan_tags = [None] * number - return [ - self.make_network(vlan_tag=vlan_tag, **kwargs) - for vlan_tag in vlan_tags - ] + networks = [] + for tag in vlan_tags: + networks.append( + self.make_Network( + vlan_tag=tag, disjoint_from=networks, **kwargs)) + return networks + + def make_BootSource(self, url=None, keyring_filename=None, + keyring_data=None): + """Create a new `BootSource`.""" + if url is None: + url = "http://%s.com" % self.make_name('source-url') + # Only set _one_ of keyring_filename and keyring_data. + if keyring_filename is None and keyring_data is None: + keyring_filename = self.make_name("keyring") + boot_source = BootSource( + url=url, + keyring_filename=( + "" if keyring_filename is None else keyring_filename), + keyring_data=( + b"" if keyring_data is None else keyring_data), + ) + boot_source.save() + return boot_source + + def make_BootSourceCache(self, boot_source=None, os=None, arch=None, + subarch=None, release=None, label=None): + """Create a new `BootSourceCache`.""" + if boot_source is None: + boot_source = self.make_BootSource() + if os is None: + os = factory.make_name('os') + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + if release is None: + release = factory.make_name('release') + if label is None: + label = factory.make_name('label') + return BootSourceCache.objects.create( + boot_source=boot_source, os=os, arch=arch, + subarch=subarch, release=release, label=label) + + def make_many_BootSourceCaches(self, number, **kwargs): + caches = list() + for _ in range(number): + caches.append(self.make_BootSourceCache(**kwargs)) + return caches + + def make_BootSourceSelection(self, boot_source=None, os=None, + release=None, arches=None, subarches=None, + labels=None): + """Create a `BootSourceSelection`.""" + if boot_source is None: + boot_source = self.make_BootSource() + if os is None: + os = self.make_name('os') + if release is None: + release = self.make_name('release') + if arches is None: + arch_count = random.randint(1, 10) + arches = [self.make_name("arch") for _ in range(arch_count)] + if subarches is None: + subarch_count = random.randint(1, 10) + subarches = [ + self.make_name("subarch") + for _ in range(subarch_count) + ] + if labels is None: + label_count = random.randint(1, 10) + labels = [self.make_name("label") for _ in range(label_count)] + boot_source_selection = BootSourceSelection( + boot_source=boot_source, release=release, arches=arches, + subarches=subarches, labels=labels) + boot_source_selection.save() + return boot_source_selection + + def make_LicenseKey(self, osystem=None, distro_series=None, + license_key=None): + if osystem is None: + osystem = factory.make_name('osystem') + if distro_series is None: + distro_series = factory.make_name('distro_series') + if license_key is None: + license_key = factory.make_name('key') + return LicenseKey.objects.create( + osystem=osystem, + distro_series=distro_series, + license_key=license_key) + + def make_EventType(self, name=None, level=None, description=None): + if name is None: + name = self.make_name('name', size=20) + if description is None: + description = factory.make_name('description') + if level is None: + level = random.choice([ + logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]) + return EventType.objects.create( + name=name, description=description, level=level) + + def make_Event(self, node=None, type=None, description=None): + if node is None: + node = self.make_Node() + if type is None: + type = self.make_EventType() + if description is None: + description = self.make_name('desc') + return Event.objects.create( + node=node, type=type, description=description) + + def make_LargeFile(self, content=None, size=512): + """Create `LargeFile`. + + :param content: Data to store in large file object. + :param size: Size of `content`. If `content` is None + then it will be a random string of this size. If content is + provided and `size` is not the same length, then it will + be an inprogress file. + """ + if content is None: + content = factory.make_string(size=size) + sha256 = hashlib.sha256() + sha256.update(content) + sha256 = sha256.hexdigest() + largeobject = LargeObjectFile() + with largeobject.open('wb') as stream: + stream.write(content) + return LargeFile.objects.create( + sha256=sha256, total_size=size, content=largeobject) + + def make_BootResource(self, rtype=None, name=None, architecture=None, + extra=None): + if rtype is None: + rtype = self.pick_enum(BOOT_RESOURCE_TYPE) + if name is None: + if rtype == BOOT_RESOURCE_TYPE.UPLOADED: + name = self.make_name('name') + else: + os = self.make_name('os') + series = self.make_name('series') + name = '%s/%s' % (os, series) + if architecture is None: + arch = self.make_name('arch') + subarch = self.make_name('subarch') + architecture = '%s/%s' % (arch, subarch) + if extra is None: + extra = { + self.make_name('key'): self.make_name('value') + for _ in range(3) + } + return BootResource.objects.create( + rtype=rtype, name=name, architecture=architecture, extra=extra) + + def make_BootResourceSet(self, resource, version=None, label=None): + if version is None: + version = self.make_name('version') + if label is None: + label = self.make_name('label') + return BootResourceSet.objects.create( + resource=resource, version=version, label=label) + + def make_BootResourceFile(self, resource_set, largefile, filename=None, + filetype=None, extra=None): + if filename is None: + filename = self.make_name('name') + if filetype is None: + filetype = self.pick_enum(BOOT_RESOURCE_FILE_TYPE) + if extra is None: + extra = { + self.make_name('key'): self.make_name('value') + for _ in range(3) + } + return BootResourceFile.objects.create( + resource_set=resource_set, largefile=largefile, filename=filename, + filetype=filetype, extra=extra) + + def make_boot_resource_file_with_content( + self, resource_set, filename=None, filetype=None, extra=None, + content=None, size=512): + largefile = self.make_LargeFile(content=content, size=size) + return self.make_BootResourceFile( + resource_set, largefile, filename=filename, filetype=filetype, + extra=extra) + + def make_usable_boot_resource( + self, rtype=None, name=None, architecture=None, + extra=None, version=None, label=None): + resource = self.make_BootResource( + rtype=rtype, name=name, architecture=architecture, extra=extra) + resource_set = self.make_BootResourceSet( + resource, version=version, label=label) + filetypes = COMMISSIONABLE_SET.union(INSTALL_SET) + filetypes.add(random.choice(XINSTALL_TYPES)) + for filetype in filetypes: + # We set the filename to the same value as filetype, as in most + # cases this will always be true. The simplestreams content from + # maas.ubuntu.com, is formatted this way. + self.make_boot_resource_file_with_content( + resource_set, filename=filetype, filetype=filetype) + return resource + # Create factory singleton. factory = Factory() diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/__init__.py maas-1.7.6+bzr3376/src/maasserver/testing/__init__.py --- maas-1.5.4+bzr2294/src/maasserver/testing/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the +# Copyright 2013-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( @@ -18,8 +18,6 @@ "get_data", "get_prefixed_form_data", "NoReceivers", - "reload_object", - "reload_objects", ] import collections @@ -30,7 +28,6 @@ from urlparse import urlparse from lxml.html import fromstring -from maasserver.utils.orm import get_one def extract_redirect(http_response): @@ -56,49 +53,19 @@ return parsed_url.path -def reload_object(model_object): - """Reload `obj` from the database. - - Use this when a test needs to inspect changes to model objects made by - the API. - - If the object has been deleted, this will return None. - - :param model_object: Model object to reload. - :type model_object: Concrete `Model` subtype. - :return: Freshly-loaded instance of `model_object`, or None. - :rtype: Same as `model_object`. - """ - model_class = model_object.__class__ - return get_one(model_class.objects.filter(id=model_object.id)) - - -def reload_objects(model_class, model_objects): - """Reload `model_objects` of type `model_class` from the database. +def get_data(filename): + """Read the content of a file in `src/maasserver/tests`. - Use this when a test needs to inspect changes to model objects made by - the API. + Some tests use this to read fixed data stored in files in + `src/maasserver/tests/data/`. - If any of the objects have been deleted, they will not be included in - the result. + Where possible, provide data in-line in tests, or use fakes, to keep the + information close to the tests that rely on it. - :param model_class: `Model` class to reload from. - :type model_class: Class. - :param model_objects: Objects to reload from the database. - :type model_objects: Sequence of `model_class` objects. - :return: Reloaded objects, in no particular order. - :rtype: Sequence of `model_class` objects. + :param filename: A file path relative to `src/maasserver/tests` in + this branch. + :return: Binary contents of the file, as `bytes`. """ - assert all(isinstance(obj, model_class) for obj in model_objects) - return model_class.objects.filter( - id__in=[obj.id for obj in model_objects]) - - -def get_data(filename): - """Utility method to read the content of files in - src/maasserver/tests. - - Usually used to read files in src/maasserver/tests/data.""" path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', 'tests', filename) return file(path).read() diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/orm.py maas-1.7.6+bzr3376/src/maasserver/testing/orm.py --- maas-1.5.4+bzr2294/src/maasserver/testing/orm.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/orm.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,58 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""ORM-related test helpers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'reload_object', + 'reload_objects', + ] + +from maasserver.utils.orm import get_one + + +def reload_object(model_object): + """Reload `obj` from the database. + + Use this when a test needs to inspect changes to model objects made by + the API. + + If the object has been deleted, this will return None. + + :param model_object: Model object to reload. + :type model_object: Concrete `Model` subtype. + :return: Freshly-loaded instance of `model_object`, or None. + :rtype: Same as `model_object`. + """ + model_class = model_object.__class__ + return get_one(model_class.objects.filter(id=model_object.id)) + + +def reload_objects(model_class, model_objects): + """Reload `model_objects` of type `model_class` from the database. + + Use this when a test needs to inspect changes to model objects made by + the API. + + If any of the objects have been deleted, they will not be included in + the result. + + :param model_class: `Model` class to reload from. + :type model_class: Class. + :param model_objects: Objects to reload from the database. + :type model_objects: Sequence of `model_class` objects. + :return: Reloaded objects, in no particular order. + :rtype: Sequence of `model_class` objects. + """ + assert all(isinstance(obj, model_class) for obj in model_objects) + return model_class.objects.filter( + id__in=[obj.id for obj in model_objects]) diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/osystems.py maas-1.7.6+bzr3376/src/maasserver/testing/osystems.py --- maas-1.5.4+bzr2294/src/maasserver/testing/osystems.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/osystems.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Helpers for operating systems in testing.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'make_usable_osystem', + 'patch_usable_osystems', + ] + +from random import randint + +from maasserver.clusterrpc.testing.osystems import ( + make_rpc_osystem, + make_rpc_release, + ) +from maasserver.testing.factory import factory +from maasserver.utils import osystems as osystems_module + + +def make_osystem_with_releases(testcase, osystem_name=None, releases=None): + """Generate an arbitrary operating system. + + :param osystem_name: The operating system name. Useful in cases where + we need to test that not supplying an os works correctly. + :param releases: The list of releases name. Useful in cases where + we need to test that not supplying a release works correctly. + """ + if osystem_name is None: + osystem_name = factory.make_name('os') + if releases is None: + releases = [factory.make_name('release') for _ in range(3)] + rpc_releases = [ + make_rpc_release(release) + for release in releases + ] + return make_rpc_osystem(osystem_name, releases=rpc_releases) + + +def patch_usable_osystems(testcase, osystems=None, allow_empty=True): + """Set a fixed list of usable operating systems. + + A usable operating system is one for which boot images are available. + + :param testcase: A `TestCase` whose `patch` this function can use. + :param osystems: Optional list of operating systems. If omitted, + defaults to a list (which may be empty) of random operating systems. + """ + start = 0 + if allow_empty is False: + start = 1 + if osystems is None: + osystems = [ + make_osystem_with_releases(testcase) + for _ in range(randint(start, 2)) + ] + testcase.patch( + osystems_module, + 'gen_all_known_operating_systems').return_value = osystems + + +def make_usable_osystem(testcase, osystem_name=None, releases=None): + """Return arbitrary operating system, and make it "usable." + + A usable operating system is one that is returned from the + RPC call ListOperatingSystems. + + :param testcase: A `TestCase` whose `patch` this function can pass to + `patch_usable_osystems`. + :param osystem_name: The operating system name. Useful in cases where + we need to test that not supplying an os works correctly. + :param releases: The list of releases name. Useful in cases where + we need to test that not supplying a release works correctly. + """ + osystem = make_osystem_with_releases( + testcase, osystem_name=osystem_name, releases=releases) + patch_usable_osystems(testcase, [osystem]) + return osystem diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/rabbit.py maas-1.7.6+bzr3376/src/maasserver/testing/rabbit.py --- maas-1.5.4+bzr2294/src/maasserver/testing/rabbit.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/rabbit.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Helpers for testing with RabbitMQ.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - "RabbitServerSettings", - "use_rabbit_fixture", - "uses_rabbit_fixture", - ] - -from functools import wraps - -from fixtures import Fixture -from rabbitfixture.server import RabbitServer -from testtools.monkey import MonkeyPatcher - - -class RabbitServerSettings(Fixture): - """ - This patches the active Django settings to point the application at the - ephemeral RabbitMQ server specified by the given configuration. - """ - - def __init__(self, config): - super(RabbitServerSettings, self).__init__() - self.config = config - - def setUp(self): - super(RabbitServerSettings, self).setUp() - from django.conf import settings - patcher = MonkeyPatcher() - patcher.add_patch( - settings, "RABBITMQ_HOST", - "%s:%d" % (self.config.hostname, self.config.port)) - patcher.add_patch(settings, "RABBITMQ_USERID", "guest") - patcher.add_patch(settings, "RABBITMQ_PASSWORD", "guest") - patcher.add_patch(settings, "RABBITMQ_VIRTUAL_HOST", "/") - patcher.add_patch(settings, "RABBITMQ_PUBLISH", True) - self.addCleanup(patcher.restore) - patcher.patch() - - -def use_rabbit_fixture(test): - """Ensure that a :class:`RabbitServer` is started, and Django's setting - updated to point to it, and that Django's settings are returned to their - original values at the end. - """ - test.rabbit = RabbitServer() - test.useFixture(test.rabbit) - settings = RabbitServerSettings(test.rabbit.config) - test.useFixture(settings) - - -def uses_rabbit_fixture(func): - """Decorate a test function with `use_rabbit_fixture`.""" - @wraps(func) - def wrapper(self): - use_rabbit_fixture(self) - return func(self) - return wrapper diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/testcase.py maas-1.7.6+bzr3376/src/maasserver/testing/testcase.py --- maas-1.5.4+bzr2294/src/maasserver/testing/testcase.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/testcase.py 2015-07-10 01:27:14.000000000 +0000 @@ -25,7 +25,6 @@ import crochet import django -from django.core.cache import cache as django_cache from django.core.urlresolvers import reverse from django.db import connection from django.test.client import encode_multipart @@ -33,18 +32,14 @@ from maasserver.clusterrpc import power_parameters from maasserver.fields import register_mac_type from maasserver.testing.factory import factory -from maastesting.celery import CeleryFixture from maastesting.djangotestcase import ( - cleanup_db, DjangoTestCase, + TransactionTestCase, ) from maastesting.fixtures import DisplayFixture -from maastesting.testcase import MAASTestCase from maastesting.utils import run_isolated from mock import Mock import provisioningserver -from provisioningserver.testing.tags import TagCachedKnowledgeFixture -from provisioningserver.testing.worker_cache import WorkerCacheFixture MIME_BOUNDARY = 'BoUnDaRyStRiNg' @@ -63,10 +58,6 @@ def setUp(self): super(MAASServerTestCase, self).setUp() - self.useFixture(WorkerCacheFixture()) - self.useFixture(TagCachedKnowledgeFixture()) - self.addCleanup(django_cache.clear) - self.celery = self.useFixture(CeleryFixture()) # This patch prevents communication with a non-existent cluster # controller when fetching power types. static_params = ( @@ -74,6 +65,15 @@ self.patch( power_parameters, 'get_all_power_types_from_clusters').return_value = static_params + # Disconnect the monitor cancellation as it's triggered by a signal. + # Avoid circular imports. + from maasserver import monitor_connect + self.patch(monitor_connect, 'MONITOR_CANCEL_CONNECT', False) + + # Disconnect the status transition event to speed up tests. + # Avoid circular imports. + from maasserver import event_connect + self.patch(event_connect, 'STATE_TRANSITION_EVENT_CONNECT', False) def client_log_in(self, as_admin=False): """Log `self.client` into MAAS. @@ -84,7 +84,7 @@ if as_admin: user = factory.make_admin(password=password) else: - user = factory.make_user(password=password) + user = factory.make_User(password=password) self.client.login(username=user.username, password=password) self.logged_in_user = user @@ -139,7 +139,7 @@ wsgiref.handlers.BaseHandler.log_exception = self.old_log_exception -class SeleniumTestCase(MAASTestCase, LiveServerTestCase): +class SeleniumTestCase(TransactionTestCase, LiveServerTestCase): """Selenium-enabled test case. Two users are pre-created: "user" for a regular user account, or "admin" @@ -169,11 +169,6 @@ "Live tests only enabled if Django.version >=1.4.") super(SeleniumTestCase, self).setUp() - def tearDown(self): - super(SeleniumTestCase, self).tearDown() - cleanup_db(self) - django_cache.clear() - @classmethod def tearDownClass(cls): if not django_supports_selenium: diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_db_migrations.py maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_db_migrations.py --- maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_db_migrations.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_db_migrations.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for helpers used to sanity-check South migrations.""" @@ -26,7 +26,7 @@ if number is None: number = randint(0, 9999) if name is None: - name = factory.getRandomString() + name = factory.make_string() return '{0:=04}_{1}'.format(number, name) diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_factory.py maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_factory.py --- maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_factory.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_factory.py 2015-07-10 01:27:14.000000000 +0000 @@ -20,47 +20,25 @@ Network, NodeGroup, ) -from maasserver.testing import reload_object from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maastesting.factory import TooManyRandomRetries - - -class FakeRandInt: - """Fake `randint` which forced limitations on its range. - - This lets you set a forced minimum, and/or a forced maximum, on the range - of any call. For example, if you pass `forced_maximum=3`, then a call - will never return more than 3. If you don't set a maximum, or if the - call's maximum argument is less than the forced maximum, then the call's - maximum will be respected. - """ - def __init__(self, real_randint, forced_minimum=None, forced_maximum=None): - self.real_randint = real_randint - self.minimum = forced_minimum - self.maximum = forced_maximum - - def __call__(self, minimum, maximum): - if self.minimum is not None: - minimum = max(minimum, self.minimum) - if self.maximum is not None: - maximum = min(maximum, self.maximum) - return self.real_randint(minimum, maximum) +from maastesting.utils import FakeRandInt class TestFactory(MAASServerTestCase): - def test_getRandomEnum_returns_enum_value(self): + def test_pick_enum_returns_enum_value(self): random_value = random.randint(0, 99999) class Enum: VALUE = random_value OTHER_VALUE = random_value + 3 - self.assertIn( - factory.getRandomEnum(Enum), [Enum.VALUE, Enum.OTHER_VALUE]) + self.assertIn(factory.pick_enum(Enum), [Enum.VALUE, Enum.OTHER_VALUE]) - def test_getRandomEnum_can_exclude_choices(self): + def test_pick_enum_can_exclude_choices(self): random_value = random.randint(0, 99999) class Enum: @@ -70,118 +48,102 @@ self.assertEqual( Enum.FIRST_VALUE, - factory.getRandomEnum( + factory.pick_enum( Enum, but_not=(Enum.SECOND_VALUE, Enum.THIRD_VALUE))) - def test_getRandomChoice_chooses_from_django_options(self): + def test_pick_choice_chooses_from_django_options(self): options = [(2, 'b'), (10, 'j')] self.assertIn( - factory.getRandomChoice(options), + factory.pick_choice(options), [option[0] for option in options]) - def test_getRandomChoice_can_exclude_choices(self): + def test_pick_choice_can_exclude_choices(self): options = [(2, 'b'), (10, 'j')] but_not = [2] self.assertEqual( - 10, factory.getRandomChoice(options, but_not=but_not)) + 10, factory.pick_choice(options, but_not=but_not)) - def test_make_node_creates_nodegroup_if_none_given(self): + def test_make_Node_creates_nodegroup_if_none_given(self): existing_nodegroup_ids = set( nodegroup.id for nodegroup in NodeGroup.objects.all()) - new_node = factory.make_node() + new_node = factory.make_Node() self.assertIsNotNone(new_node.nodegroup) self.assertNotIn(new_node.nodegroup.id, existing_nodegroup_ids) - def test_make_node_uses_given_nodegroup(self): - nodegroup = factory.make_node_group() + def test_make_Node_uses_given_nodegroup(self): + nodegroup = factory.make_NodeGroup() self.assertEqual( - nodegroup, factory.make_node(nodegroup=nodegroup).nodegroup) + nodegroup, factory.make_Node(nodegroup=nodegroup).nodegroup) - def test_make_zone_returns_physical_zone(self): - self.assertIsNotNone(factory.make_zone()) + def test_make_Zone_returns_physical_zone(self): + self.assertIsNotNone(factory.make_Zone()) - def test_make_zone_assigns_name(self): - name = factory.make_zone().name + def test_make_Zone_assigns_name(self): + name = factory.make_Zone().name self.assertIsNotNone(name) self.assertNotEqual(0, len(name)) - def test_make_zone_returns_unique_zone(self): - self.assertNotEqual(factory.make_zone(), factory.make_zone()) + def test_make_Zone_returns_unique_zone(self): + self.assertNotEqual(factory.make_Zone(), factory.make_Zone()) - def test_make_zone_adds_nodes(self): - node = factory.make_node() - zone = factory.make_zone(nodes=[node]) + def test_make_Zone_adds_nodes(self): + node = factory.make_Node() + zone = factory.make_Zone(nodes=[node]) node = reload_object(node) self.assertEqual(zone, node.zone) - def test_make_zone_does_not_add_other_nodes(self): - previous_zone = factory.make_zone() - node = factory.make_node(zone=previous_zone) - factory.make_zone(nodes=[factory.make_node()]) + def test_make_Zone_does_not_add_other_nodes(self): + previous_zone = factory.make_Zone() + node = factory.make_Node(zone=previous_zone) + factory.make_Zone(nodes=[factory.make_Node()]) node = reload_object(node) self.assertEqual(previous_zone, node.zone) - def test_make_zone_adds_no_nodes_by_default(self): - previous_zone = factory.make_zone() - node = factory.make_node(zone=previous_zone) - factory.make_zone() + def test_make_Zone_adds_no_nodes_by_default(self): + previous_zone = factory.make_Zone() + node = factory.make_Node(zone=previous_zone) + factory.make_Zone() node = reload_object(node) self.assertEqual(previous_zone, node.zone) - def test_make_vlan_tag_excludes_None_by_default(self): - # Artificially limit randint to a very narrow range, to guarantee - # some repetition in its output, and virtually guarantee that we test - # both outcomes of the flip-a-coin call in make_vlan_tag. - self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) - outcomes = {factory.make_vlan_tag() for _ in range(1000)} - self.assertEqual({1}, outcomes) - - def test_make_vlan_tags_includes_None_if_allow_none(self): - self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) - self.assertEqual( - {None, 1}, - { - factory.make_vlan_tag(allow_none=True) - for _ in range(1000) - }) - - def test_make_network_lowers_names_if_sortable_name(self): - networks = factory.make_networks(10, sortable_name=True) + def test_make_Networks_lowers_names_if_sortable_name(self): + networks = factory.make_Networks(10, sortable_name=True) self.assertEqual( [network.name.lower() for network in networks], [network.name for network in networks]) - def test_make_networks_generates_desired_number_of_networks(self): + def test_make_Networks_generates_desired_number_of_networks(self): number = random.randint(1, 20) - networks = factory.make_networks(number) + networks = factory.make_Networks(number) self.assertEqual(number, len(networks)) self.assertIsInstance(networks[0], Network) self.assertIsInstance(networks[-1], Network) - def test_make_networks_passes_on_keyword_arguments(self): - description = factory.getRandomString() - [network] = factory.make_networks(1, description=description) + def test_make_Networks_passes_on_keyword_arguments(self): + description = factory.make_string() + [network] = factory.make_Networks(1, description=description) self.assertEqual(description, network.description) - def test_make_networks_includes_VLANs_by_default(self): + def test_make_Networks_includes_VLANs_by_default(self): class FakeNetwork: def __init__(self, vlan_tag, *args, **kwargs): self.vlan_tag = vlan_tag - self.patch(factory, 'make_network', FakeNetwork) + self.patch(factory, 'make_Network', FakeNetwork) self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) - networks = factory.make_networks(100) + networks = factory.make_Networks(100) self.assertEqual({None, 1}, {network.vlan_tag for network in networks}) - def test_make_networks_excludes_VLANs_if_not_with_vlans(self): + def test_make_Networks_excludes_VLANs_if_not_with_vlans(self): class FakeNetwork: def __init__(self, vlan_tag, *args, **kwargs): self.vlan_tag = vlan_tag - self.patch(factory, 'make_network', FakeNetwork) + self.patch(factory, 'make_Network', FakeNetwork) self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) - networks = factory.make_networks(100, with_vlans=False) + networks = factory.make_Networks(100, with_vlans=False) self.assertEqual({None}, {network.vlan_tag for network in networks}) - def test_make_networks_gives_up_if_random_tags_keep_clashing(self): - self.patch(factory, 'make_network') + def test_make_Networks_gives_up_if_random_tags_keep_clashing(self): + self.patch(factory, 'make_Network') self.patch(random, 'randint', lambda *args: 1) - self.assertRaises(TooManyRandomRetries, factory.make_networks, 2) + self.patch(factory, 'pick_bool', lambda *args: False) + self.assertRaises(TooManyRandomRetries, factory.make_Networks, 2) diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_module.py maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_module.py --- maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_module.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_module.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.testing`.""" @@ -27,10 +27,12 @@ from maasserver.testing import ( extract_redirect, NoReceivers, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import ( reload_object, reload_objects, ) -from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.testing.tests.models import TestModel from maastesting.djangotestcase import TestModelMixin @@ -51,19 +53,19 @@ app = 'maasserver.testing.tests' def test_extract_redirect_extracts_redirect_location(self): - url = factory.getRandomString() + url = factory.make_string() self.assertEqual( url, extract_redirect(HttpResponseRedirect(url))) def test_extract_redirect_only_returns_target_path(self): - url_path = factory.getRandomString() + url_path = factory.make_string() self.assertEqual( "/%s" % url_path, extract_redirect( HttpResponseRedirect("http://example.com/%s" % url_path))) def test_extract_redirect_errors_out_helpfully_if_not_a_redirect(self): - content = factory.getRandomString(10) + content = factory.make_string(10) other_response = HttpResponse(status=httplib.OK, content=content) try: extract_redirect(other_response) diff -Nru maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_rabbit.py maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_rabbit.py --- maas-1.5.4+bzr2294/src/maasserver/testing/tests/test_rabbit.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/testing/tests/test_rabbit.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for `maastesting.rabbit`.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from django.conf import settings -from maasserver.testing.rabbit import RabbitServerSettings -from maastesting.factory import factory -from maastesting.testcase import MAASTestCase -from rabbitfixture.server import RabbitServerResources - - -class TestRabbitServerSettings(MAASTestCase): - - def test_patch(self): - config = RabbitServerResources( - hostname=factory.getRandomString(), - port=factory.getRandomPort(), - dist_port=factory.getRandomPort()) - self.useFixture(config) - self.useFixture(RabbitServerSettings(config)) - self.assertEqual( - "%s:%d" % (config.hostname, config.port), - settings.RABBITMQ_HOST) - self.assertEqual("guest", settings.RABBITMQ_PASSWORD) - self.assertEqual("guest", settings.RABBITMQ_USERID) - self.assertEqual("/", settings.RABBITMQ_VIRTUAL_HOST) - self.assertTrue(settings.RABBITMQ_PUBLISH) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_0.pem maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_0.pem --- maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_0.pem 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_0.pem 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC7zCCAdegAwIBAgIJAIe2LoljdfboMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzIzNloXDTI0MDQyMjE4MzIzNlowEDEOMAwG +A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9B9r6 +4tFkbzu71iq9NYAr0u7BkybolU7V86Zpvg59MPzjW7zAf17N8hKRGfMYWJbgp3Wf +NNbq1Bys+4BBDQn+nRkr1H8bAuzz3+HjhX/InjGyBm51MGaqtO4nJk/PX9s4qurX +BTWhg/X/S0u7P+oqoUY3idLxAOKuZGrMIzUhikxew35QcDY3aiLjtrKN6R1SM+8S +PQR0aOoGdMpXkAOA/zPEU6qYCaXfI56/TrxEBFohffTuTVZxOLv3OoN/2NkwxYCN +sSLV/gAeX3Xi49K9++cDcWPK+I1t3uQ1psfWIjQMhwyUkOO7NHMOEtNCWbTvr/4w +q8tTMAFQVCjJ9nxNAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud +EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw +DQYJKoZIhvcNAQELBQADggEBABMLAl8asMyyMKFKMov9+mNkNvXB2KroJbjGNTxd +b/ZFrwunYx+TAiYLy0/mnrJTHAl49+Xbr+kYsHSh4Bkj4TgXLBPulcbc7oM/KYuE +YP8SK556f3IWqylYXaUmZBK4VOUKMYO/r2ZHbaaIm5DQGCVt9RGE5Nz22xGepJ4M +FpK9fFx2PO17zEwCv32EAyUeTmnLt62g+V1ch4tQdoYT/O5KnYhh3dcFGCLPiOHZ +JVZ/qG6DCvOYfYeU7oGtia9Uu+ClrS+nCTofYQYYFE0WKqGoS5tIxgRWjOUdvHrU +5dZFN3ODiH4KVk+B+VJfRngiMiJLKrD5/FAojThTGInIi9g= +-----END CERTIFICATE----- diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_1.pem maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_1.pem --- maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_1.pem 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_1.pem 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC7zCCAdegAwIBAgIJAKcZaoHYOg4cMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQyM1oXDTI0MDQyMjE4MzQyM1owEDEOMAwG +A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCq0wOT +PL2ZhtcU/1wzO7bVDRW3Me3SU/SqtDSgPT57CcrS2QM2LdjOO2DBedxNQI6X1nFb +orOGgYu1xxX0Hs7KAnwyHVwsks9d+zNw5y0A8qN1Ml+s4+h6lobVGpL60rEfP2dG +RFfjU5drbYLHLkf71Et46TdSskb9qoygs7XoPothx/t38C5HtwzUXupAge4BJR47 +yvBvbrkC2HZrmsKhlqnJuuXWVaUXfAAmBkLVncBPYBZci/lpvhWACcK3ZP1Fq28c +3VS+OnHFe8MNiiI34jHRtct/NHwDMBvsfo0Jx2fBa8RsW+WtmU0AvgU/7Nz+uHe2 +KlKf2odJ3Wh2suqHAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud +EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw +DQYJKoZIhvcNAQELBQADggEBAITOKn0wNRQ921gpRfLMjXnV8lbVjGqSlCDXjwxk +LahYH0q9zs4GTjKomz5QOPLrZ1bTum9DcbHxiWs+gcwCAqZ3uxFAyKWi5jJSGZmU +ZY/cUxqVNkoCTRBVjjAVw+zrgJE1EI5BIe2tHMmU/+9oIOKq6BCcEQgT75FxyPqf +cT0YR9k2qeTDN7TrdZ1uBZLfQFKRP4ILfzOrW3e9aHdJ5M1fvQ4pVfu0/FI3+wrz +HE66X0Ct66iemU7Ey5HoqlRsLJpfUO2ZRN569fbK3v7NG5Fuz1ihD7ZSn5P5wgKU +nSh9IfPKLuSuJNVK9ivLt2LNPQ172LkHlwscoDq0JhWMwj0= +-----END CERTIFICATE----- diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_2.pem maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_2.pem --- maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_2.pem 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_2.pem 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC7zCCAdegAwIBAgIJAMX+Jd4nv6LJMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQzMFoXDTI0MDQyMjE4MzQzMFowEDEOMAwG +A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7Bc72 +dtZN/qfX9SOhJqmK1GLOopM+oh12Wqnl4mqKHfeJkbqzLj7zeCLzuFGbQJAQKTOP +C8PUaJege4kg9Z3txuIaR195aULc33Q2cVXXNgTCv9DohRcNn5D+rhaUTw39OJS9 +rbUPIdSHgH0yXvwbPcp22Qpky6WM34HEW3t3naod110aLIGCDE8QkTRztB364UWX +IPIaKHMciTN0A1EOx/BaId6p4uHIcR7KanVbiT7F12RJ355RmcIM4t4GNSESRpei +MiBbBBD50/IxgYqSuyIPQjFnGCZKuR2whu1AmrtpeECQEqaJvtg8vRI9sJUSw1o1 +93aowHY7hZzylp4jAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud +EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw +DQYJKoZIhvcNAQELBQADggEBAAOsz6/joMygbZrtqpTdtl31Bf9/s3/xjhRpQqjK +9obPR/URJjhmZdGjY6bnNDDn1buRXmQqUHr20xPg+iOj2p+XOivJwFt6fV1j0Yyx +oeVpzNtKVU35GF2QtLsbOSsYa/IEXP9v3BNGWgB1YhjRca7WzN57+qDQ/FiZ2ozM +5bJe5FxIkrJVRpR9XrD/wDXiIUv0GS54fNfFdcFEo8drfmbr2OO5y0dofREf10sX +WDtyKeoyB+T/0rQ/3GpWmDoCApb1kMbwtNX75hoNoQyUbYsAROpb3k400buw2I+S +apkLbtE0tPklh9vP0buo0IohzGlPlE9W1D1dIMqfnX432GM= +-----END CERTIFICATE----- diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_3.pem maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_3.pem --- maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_3.pem 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_3.pem 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC7zCCAdegAwIBAgIJAKmOFdKgRIj7MA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQzN1oXDTI0MDQyMjE4MzQzN1owEDEOMAwG +A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhqRd3 +RiNMevSD5onqBcHncE274DYm4xMJdFKVe3kCrzfl987g6CWsz+kZxqJLzC2DSa5Y +6iznEAL97MCBaBtffHtZi3SJa9mLSZO8+XvHqNpmQK+hNXidX3DJIrpfXQNqSgI3 +s8Rmd17qSlpFLy6wA83Df3+qtc6RZp2CTxKXPvbuXpPvQJKfK9yGpLlKE+IlzFap +Ed3GWB9pSDlpxeMM8bQrhnJBx6rLCOTFyKhHUcgzgHCbqwV7Pqc6y/ctuL4schvO +bJj74SKI78ORdfT15g2yqgXNZPAsHl911PfidRVBWk68MPNoRVi2poRCo9sCFkmo +TDhs0tcZD759XBOXAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud +EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw +DQYJKoZIhvcNAQELBQADggEBAJDwBbfSHiZLoALB+IOcLlEGyLQe8AUXtaWicSs3 +8gONQNdJqAhNJ9rcdQq6qdgDS10ZN94ZkoU9XXL6IS8HSagO6wBGCkyjOak9zePO +1iOPQm7qbqzbXXXamVSq1AIFz3VTILEfi+q/PYx62ztRYINNHJSWz+hR6f7Rz4D8 +koXMES4ElG5iGcMcnaEHdWBolr9HlCnDX8XuQ6cAOGbUbSmj6V6wuKvOmuHSgjJx +rxsehEkgZi/EVJNkwvkLVJ00R3aL6lINHJa/Qdza17EGR5QZdpCeUV7gbjmu8nut +Ctlxs+zXXhO6wwfZwzQHNN5U/RPPfxbgsN7PKncE0upuCmI= +-----END CERTIFICATE----- diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_4.pem maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_4.pem --- maas-1.5.4+bzr2294/src/maasserver/tests/data/test_x509_4.pem 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/data/test_x509_4.pem 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC7zCCAdegAwIBAgIJAMMCNW5S4V3wMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQ0MloXDTI0MDQyMjE4MzQ0MlowEDEOMAwG +A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAWBwK +zS/ZdPBvGrGLUPn51EenzrVuB/gzoMDllZyRl9nnPPNTOdlMbiUQXkUp01Tcum5X +GWvhU0+xnlhyeDqAseOMCJcnm6Q6wjSj73QUST4Jmc5u+LzhPr3gzpSXl4vVJ9dD +xyEufCll/lzUIEgDSUzpvuXO+jXN/nlnHWLTJ84qFtoO13J8sh9Zh46mo9r3yKqe +0Z+a1O8Y2/ru6zR/2Yipi6PiQVne8ZSbwU9zgJRI3JAWHvFfdmuoy2FMvp4s54M7 +sUBom+xhl6/zHC0kgAl7V9j/8JGoiNU3J5OBZ1EoqiYz/7u+7sQ/YACrF0YGqRI2 +etYCLrqJpCC/Zkm3AgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud +EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw +DQYJKoZIhvcNAQELBQADggEBAA2MolplJeMBUv/7ydQ8O5BqbNqTyyk0MUmYFvQG +EZlXtWgLk0RCX6vyQVGbw50NibOYXLyZDdEb4RCfswpPnY4aJEr/PXbn7zDAsG+I +vtl3zhOsNZhaDF9WNKZjvpd4NmXOLj8nk+EW+MKwxgUab1jMKcQiodBdtGQ8HzEk +6OKrj2nxZweDe6jC4hH9E19FF7wOsc4zOFyVNtdhPkdgEv/ksTVkKf+fXH62t18P +OWB0i33U0tONAL+cd+ssED/LBrRMotcQ+W/lN+a3O2Z1+YoKz3YpJkytYOdkQzHn +JMRGocMDS4Rcv8QdhB9uKHBxEKIOSugauWuDR4epOHItqp8= +-----END CERTIFICATE----- diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/models.py maas-1.7.6+bzr3376/src/maasserver/tests/models.py --- maas-1.5.4+bzr2294/src/maasserver/tests/models.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/models.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,6 +13,7 @@ __metaclass__ = type __all__ = [ + 'MAASIPAddressFieldModel', 'JSONFieldModel', 'FieldChangeTestModel', ] @@ -24,6 +25,8 @@ ) from maasserver.fields import ( JSONObjectField, + LargeObjectField, + MAASIPAddressField, XMLField, ) from maasserver.models.managers import BulkManager @@ -67,3 +70,12 @@ parent = ForeignKey('BulkManagerParentTestModel', editable=False) objects = BulkManager() + + +class MAASIPAddressFieldModel(Model): + ip_address = MAASIPAddressField() + + +class LargeObjectFieldModel(Model): + name = CharField(max_length=255, unique=False) + large_object = LargeObjectField(block_size=10) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_auth.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_auth.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_auth.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_auth.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test `api_auth` module.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from maasserver import api_auth -from maasserver.testing.factory import factory -from maastesting.testcase import MAASTestCase -from oauth import oauth -from testtools.matchers import Contains - - -class TestOAuthUnauthorized(MAASTestCase): - - def test_OAuthUnauthorized_repr_include_original_failure_message(self): - error_msg = factory.make_name('error-message') - original_exception = oauth.OAuthError(error_msg) - maas_exception = api_auth.OAuthUnauthorized(original_exception) - self.assertThat(unicode(maas_exception), Contains(error_msg)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_boot_images.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_boot_images.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_boot_images.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_boot_images.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,266 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the `Boot Images` API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from apiclient.maas_client import MAASClient -from django.conf import settings -from django.core.urlresolvers import reverse -from fixtures import EnvironmentVariableFixture -from maasserver import api -from maasserver.api import ( - summarise_boot_image_dict, - summarise_boot_image_object, - ) -from maasserver.enum import ( - COMPONENT, - NODEGROUP_STATUS, - ) -from maasserver.models import ( - BootImage, - NodeGroup, - ) -from maasserver.refresh_worker import refresh_worker -from maasserver.testing import reload_object -from maasserver.testing.api import ( - APITestCase, - log_in_as_normal_user, - make_worker_client, - ) -from maasserver.testing.factory import factory -from maastesting.celery import CeleryFixture -from maastesting.matchers import MockCalledOnceWith -from mock import ( - ANY, - Mock, - ) -from provisioningserver import ( - boot_images, - tasks, - ) -from provisioningserver.boot import tftppath -from provisioningserver.testing.boot_images import make_boot_image_params -from testresources import FixtureResource -from testtools.matchers import MatchesStructure - - -def get_boot_image_uri(boot_image): - """Return a boot image's URI on the API.""" - return reverse( - 'boot_image_handler', - args=[boot_image.nodegroup.uuid, boot_image.id]) - - -class TestBootImageAPI(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/nodegroups/uuid/boot-images/3/', - reverse('boot_image_handler', args=['uuid', '3'])) - - def test_GET_returns_boot_image(self): - boot_image = factory.make_boot_image() - response = self.client.get(get_boot_image_uri(boot_image)) - self.assertEqual(httplib.OK, response.status_code) - returned_boot_image = json.loads(response.content) - # The returned object contains a 'resource_uri' field. - self.assertEqual( - reverse( - 'boot_image_handler', - args=[boot_image.nodegroup.uuid, boot_image.id] - ), - returned_boot_image['resource_uri']) - # The other fields are the boot image's fields. - del returned_boot_image['resource_uri'] - self.assertThat( - boot_image, - MatchesStructure.byEquality(**returned_boot_image)) - - -class TestBootImagesAPI(APITestCase): - """Test the the boot images API.""" - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/nodegroups/uuid/boot-images/', - reverse('boot_images_handler', args=['uuid'])) - - def test_GET_returns_boot_image_list(self): - nodegroup = factory.make_node_group() - images = [ - factory.make_boot_image(nodegroup=nodegroup) for _ in range(3)] - # Create images in another nodegroup. - [factory.make_boot_image() for _ in range(3)] - response = self.client.get( - reverse('boot_images_handler', args=[nodegroup.uuid])) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [boot_image.id for boot_image in images], - [boot_image.get('id') for boot_image in parsed_result]) - - -class TestBootImagesReportImagesAPI(APITestCase): - """Test the method report_boot_images from the boot images API.""" - - resources = ( - ('celery', FixtureResource(CeleryFixture())), - ) - - def report_images(self, nodegroup, images, client=None): - if client is None: - client = self.client - return client.post( - reverse('boot_images_handler', args=[nodegroup.uuid]), { - 'images': json.dumps(images), - 'op': 'report_boot_images', - }) - - def test_summarise_boot_image_object_returns_tuple(self): - image = factory.make_boot_image() - self.assertEqual( - ( - image.architecture, - image.subarchitecture, - image.release, - image.label, - image.purpose, - ), - summarise_boot_image_object(image)) - - def test_summarise_boot_image_dict_returns_tuple(self): - image = make_boot_image_params() - self.assertEqual( - ( - image['architecture'], - image['subarchitecture'], - image['release'], - image['label'], - image['purpose'], - ), - summarise_boot_image_dict(image)) - - def test_summarise_boot_image_dict_substitutes_defaults(self): - image = make_boot_image_params() - del image['subarchitecture'] - del image['label'] - _, subarchitecture, _, label, _ = summarise_boot_image_dict(image) - self.assertEqual(('generic', 'release'), (subarchitecture, label)) - - def test_summarise_boot_image_functions_are_compatible(self): - image_dict = make_boot_image_params() - image_obj = factory.make_boot_image( - architecture=image_dict['architecture'], - subarchitecture=image_dict['subarchitecture'], - release=image_dict['release'], label=image_dict['label'], - purpose=image_dict['purpose']) - self.assertEqual( - summarise_boot_image_dict(image_dict), - summarise_boot_image_object(image_obj)) - - def test_report_boot_images_does_not_work_for_normal_user(self): - nodegroup = NodeGroup.objects.ensure_master() - log_in_as_normal_user(self.client) - response = self.report_images(nodegroup, []) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_report_boot_images_works_for_master_worker(self): - nodegroup = NodeGroup.objects.ensure_master() - client = make_worker_client(nodegroup) - response = self.report_images(nodegroup, [], client=client) - self.assertEqual(httplib.OK, response.status_code) - - def test_report_boot_images_stores_images(self): - nodegroup = NodeGroup.objects.ensure_master() - image = make_boot_image_params() - client = make_worker_client(nodegroup) - response = self.report_images(nodegroup, [image], client=client) - self.assertEqual( - (httplib.OK, "OK"), - (response.status_code, response.content)) - self.assertTrue( - BootImage.objects.have_image(nodegroup=nodegroup, **image)) - - def test_report_boot_images_removes_unreported_images(self): - deleted_image = factory.make_boot_image() - nodegroup = deleted_image.nodegroup - client = make_worker_client(nodegroup) - response = self.report_images(nodegroup, [], client=client) - self.assertEqual(httplib.OK, response.status_code) - self.assertIsNone(reload_object(deleted_image)) - - def test_report_boot_images_keeps_known_images(self): - nodegroup = factory.make_node_group() - image = make_boot_image_params() - client = make_worker_client(nodegroup) - response = self.report_images(nodegroup, [image], client=client) - self.assertEqual(httplib.OK, response.status_code) - known_image = BootImage.objects.get(nodegroup=nodegroup) - response = self.report_images(nodegroup, [image], client=client) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(known_image, reload_object(known_image)) - - def test_report_boot_images_ignores_images_for_other_nodegroups(self): - unrelated_image = factory.make_boot_image() - deleted_image = factory.make_boot_image() - nodegroup = deleted_image.nodegroup - client = make_worker_client(nodegroup) - response = self.report_images(nodegroup, [], client=client) - self.assertEqual(httplib.OK, response.status_code) - self.assertIsNotNone(reload_object(unrelated_image)) - - def test_report_boot_images_ignores_unknown_image_properties(self): - nodegroup = NodeGroup.objects.ensure_master() - image = make_boot_image_params() - image['nonesuch'] = factory.make_name('nonesuch'), - client = make_worker_client(nodegroup) - response = self.report_images(nodegroup, [image], client=client) - self.assertEqual( - (httplib.OK, "OK"), - (response.status_code, response.content)) - - def test_report_boot_images_warns_about_missing_boot_images(self): - register_error = self.patch(api, 'register_persistent_error') - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - response = self.report_images( - nodegroup, [], client=make_worker_client(nodegroup)) - self.assertEqual(httplib.OK, response.status_code) - self.assertThat( - register_error, - MockCalledOnceWith(COMPONENT.IMPORT_PXE_FILES, ANY)) - - def test_worker_calls_report_boot_images(self): - # report_boot_images() uses the report_boot_images op on the nodes - # handlers to send image information. - self.useFixture( - EnvironmentVariableFixture("MAAS_URL", settings.DEFAULT_MAAS_URL)) - refresh_worker(NodeGroup.objects.ensure_master()) - self.patch(MAASClient, 'post') - self.patch(tftppath, 'list_boot_images', Mock(return_value=[])) - nodegroup_uuid = factory.make_name('uuid') - get_cluster_uuid = self.patch(boot_images, "get_cluster_uuid") - get_cluster_uuid.return_value = nodegroup_uuid - - tasks.report_boot_images.delay() - - # We're not concerned about the payload (images) here; - # this is tested in provisioningserver.tests.test_boot_images. - MAASClient.post.assert_called_once_with( - path=reverse( - 'boot_images_handler', args=[nodegroup_uuid]).lstrip('/'), - op='report_boot_images', images=ANY) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_commissioning.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_commissioning.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_commissioning.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_commissioning.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,294 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the commissioning-related portions of the MAAS API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from base64 import b64encode -from datetime import ( - datetime, - timedelta, - ) -import httplib -import json - -from django.conf import settings -from django.core.urlresolvers import reverse -from maasserver.enum import NODE_STATUS -from maasserver.testing import reload_object -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maastesting.utils import sample_binary_data -from metadataserver.models import CommissioningScript - - -class TestCommissioningTimeout(MAASServerTestCase): - """Testing of commissioning timeout API.""" - - def test_check_with_no_action(self): - self.client_log_in() - node = factory.make_node(status=NODE_STATUS.READY) - response = self.client.post( - reverse('nodes_handler'), {'op': 'check_commissioning'}) - # Anything that's not commissioning should be ignored. - node = reload_object(node) - self.assertEqual( - (httplib.OK, NODE_STATUS.READY), - (response.status_code, node.status)) - - def test_check_with_commissioning_but_not_expired_node(self): - self.client_log_in() - node = factory.make_node( - status=NODE_STATUS.COMMISSIONING) - response = self.client.post( - reverse('nodes_handler'), {'op': 'check_commissioning'}) - node = reload_object(node) - self.assertEqual( - (httplib.OK, NODE_STATUS.COMMISSIONING), - (response.status_code, node.status)) - - def test_check_with_commissioning_and_expired_node(self): - self.client_log_in() - # Have an interval 1 second longer than the timeout. - interval = timedelta(seconds=1, minutes=settings.COMMISSIONING_TIMEOUT) - updated_at = datetime.now() - interval - node = factory.make_node( - status=NODE_STATUS.COMMISSIONING, created=datetime.now(), - updated=updated_at) - - response = self.client.post( - reverse('nodes_handler'), {'op': 'check_commissioning'}) - self.assertEqual( - ( - httplib.OK, - NODE_STATUS.FAILED_TESTS, - [node.system_id] - ), - ( - response.status_code, - reload_object(node).status, - [response_node['system_id'] - for response_node in json.loads(response.content)], - )) - - -class AdminCommissioningScriptsAPITest(MAASServerTestCase): - """Tests for `CommissioningScriptsHandler`.""" - - def get_url(self): - return reverse('commissioning_scripts_handler') - - def test_GET_lists_commissioning_scripts(self): - self.client_log_in(as_admin=True) - # Use lower-case names. The database and the test may use - # different collation orders with different ideas about case - # sensitivity. - names = {factory.make_name('script').lower() for counter in range(5)} - for name in names: - factory.make_commissioning_script(name=name) - - response = self.client.get(self.get_url()) - - self.assertEqual( - (httplib.OK, sorted(names)), - (response.status_code, json.loads(response.content))) - - def test_POST_creates_commissioning_script(self): - self.client_log_in(as_admin=True) - # This uses Piston's built-in POST code, so there are no tests for - # corner cases (like "script already exists") here. - name = factory.make_name('script') - content = factory.getRandomBytes() - - # Every uploaded file also has a name. But this is completely - # unrelated to the name we give to the commissioning script. - response = self.client.post( - self.get_url(), - { - 'name': name, - 'content': factory.make_file_upload(content=content), - }) - self.assertEqual(httplib.OK, response.status_code) - - returned_script = json.loads(response.content) - self.assertEqual( - (name, b64encode(content).decode("ascii")), - (returned_script['name'], returned_script['content'])) - - stored_script = CommissioningScript.objects.get(name=name) - self.assertEqual(content, stored_script.content) - - -class CommissioningScriptsAPITest(APITestCase): - - def get_url(self): - return reverse('commissioning_scripts_handler') - - def test_GET_is_forbidden(self): - response = self.client.get(self.get_url()) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_POST_is_forbidden(self): - response = self.client.post( - self.get_url(), - {'name': factory.make_name('script')}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - -class AdminCommissioningScriptAPITest(MAASServerTestCase): - """Tests for `CommissioningScriptHandler`.""" - - def get_url(self, script_name): - return reverse('commissioning_script_handler', args=[script_name]) - - def test_GET_returns_script_contents(self): - self.client_log_in(as_admin=True) - script = factory.make_commissioning_script() - response = self.client.get(self.get_url(script.name)) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(script.content, response.content) - - def test_GET_preserves_binary_data(self): - self.client_log_in(as_admin=True) - script = factory.make_commissioning_script(content=sample_binary_data) - response = self.client.get(self.get_url(script.name)) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(sample_binary_data, response.content) - - def test_PUT_updates_contents(self): - self.client_log_in(as_admin=True) - old_content = b'old:%s' % factory.getRandomString().encode('ascii') - script = factory.make_commissioning_script(content=old_content) - new_content = b'new:%s' % factory.getRandomString().encode('ascii') - - response = self.client_put( - self.get_url(script.name), - {'content': factory.make_file_upload(content=new_content)}) - self.assertEqual(httplib.OK, response.status_code) - - self.assertEqual(new_content, reload_object(script).content) - - def test_DELETE_deletes_script(self): - self.client_log_in(as_admin=True) - script = factory.make_commissioning_script() - self.client.delete(self.get_url(script.name)) - self.assertItemsEqual( - [], - CommissioningScript.objects.filter(name=script.name)) - - -class CommissioningScriptAPITest(APITestCase): - - def get_url(self, script_name): - return reverse('commissioning_script_handler', args=[script_name]) - - def test_GET_is_forbidden(self): - # It's not inconceivable that commissioning scripts contain - # credentials of some sort. There is no need for regular users - # (consumers of the MAAS) to see these. - script = factory.make_commissioning_script() - response = self.client.get(self.get_url(script.name)) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_PUT_is_forbidden(self): - script = factory.make_commissioning_script() - response = self.client_put( - self.get_url(script.name), {'content': factory.getRandomString()}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_DELETE_is_forbidden(self): - script = factory.make_commissioning_script() - response = self.client_put(self.get_url(script.name)) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - -class NodeCommissionResultHandlerAPITest(APITestCase): - - def test_list_returns_commissioning_results(self): - commissioning_results = [ - factory.make_node_commission_result() - for counter in range(3)] - url = reverse('commissioning_results_handler') - response = self.client.get(url, {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_results = json.loads(response.content) - self.assertItemsEqual( - [ - ( - commissioning_result.name, - commissioning_result.script_result, - b64encode(commissioning_result.data), - commissioning_result.node.system_id, - ) - for commissioning_result in commissioning_results - ], - [ - ( - result.get('name'), - result.get('script_result'), - result.get('data'), - result.get('node').get('system_id'), - ) - for result in parsed_results - ] - ) - - def test_list_can_be_filtered_by_node(self): - commissioning_results = [ - factory.make_node_commission_result() - for counter in range(3)] - url = reverse('commissioning_results_handler') - response = self.client.get( - url, - { - 'op': 'list', - 'system_id': [ - commissioning_results[0].node.system_id, - commissioning_results[1].node.system_id, - ], - } - ) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_results = json.loads(response.content) - self.assertItemsEqual( - [b64encode(commissioning_results[0].data), - b64encode(commissioning_results[1].data)], - [result.get('data') for result in parsed_results]) - - def test_list_can_be_filtered_by_name(self): - commissioning_results = [ - factory.make_node_commission_result() - for counter in range(3)] - url = reverse('commissioning_results_handler') - response = self.client.get( - url, - { - 'op': 'list', - 'name': commissioning_results[0].name - } - ) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_results = json.loads(response.content) - self.assertItemsEqual( - [b64encode(commissioning_results[0].data)], - [result.get('data') for result in parsed_results]) - - def test_list_displays_only_visible_nodes(self): - node = factory.make_node(owner=factory.make_user()) - factory.make_node_commission_result(node) - url = reverse('commissioning_results_handler') - response = self.client.get(url, {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_results = json.loads(response.content) - self.assertEqual([], parsed_results) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_describe.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_describe.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_describe.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_describe.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the `describe` view.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json -from operator import itemgetter -from urlparse import urlparse - -import django.core.urlresolvers -from django.core.urlresolvers import ( - reverse, - get_script_prefix, - ) -from django.test.client import RequestFactory -from maasserver.api import describe -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from testscenarios import multiply_scenarios -from testtools.matchers import ( - AfterPreprocessing, - AllMatch, - Contains, - Equals, - Is, - MatchesAll, - MatchesAny, - MatchesListwise, - MatchesStructure, - StartsWith, - ) - - -class TestDescribe(MAASServerTestCase): - """Tests for the `describe` view.""" - - def test_describe_returns_json(self): - response = self.client.get(reverse('describe')) - self.assertThat( - (response.status_code, - response['Content-Type'], - response.content, - response.content), - MatchesListwise( - (Equals(httplib.OK), - Equals("application/json"), - StartsWith(b'{'), - Contains('name'))), - response) - - def test_describe(self): - response = self.client.get(reverse('describe')) - description = json.loads(response.content) - self.assertSetEqual( - {"doc", "handlers", "resources"}, set(description)) - self.assertIsInstance(description["handlers"], list) - - -class TestDescribeAbsoluteURIs(MAASServerTestCase): - """Tests for the `describe` view's URI manipulation.""" - - scenarios_schemes = ( - ("http", dict(scheme="http")), - ("https", dict(scheme="https")), - ) - - scenarios_paths = ( - ("script-at-root", dict(script_name="", path_info="")), - ("script-below-root-1", dict(script_name="/foo/bar", path_info="")), - ("script-below-root-2", dict(script_name="/foo", path_info="/bar")), - ) - - scenarios = multiply_scenarios( - scenarios_schemes, scenarios_paths) - - def make_params(self): - """Create parameters for http request, based on current scenario.""" - return { - "PATH_INFO": self.path_info, - "SCRIPT_NAME": self.script_name, - "SERVER_NAME": factory.make_name('server').lower(), - "wsgi.url_scheme": self.scheme, - } - - def get_description(self, params): - """GET the API description (at a random API path), as JSON.""" - path = '/%s/describe' % factory.make_name('path') - request = RequestFactory().get(path, **params) - response = describe(request) - self.assertEqual( - httplib.OK, response.status_code, - "API description failed with code %s:\n%s" - % (response.status_code, response.content)) - return json.loads(response.content) - - def patch_script_prefix(self, script_name): - """Patch up Django's and Piston's notion of the script_name prefix. - - This manipulates how Piston gets Django's version of script_name - which it needs so that it can prefix script_name to URL paths. - """ - # Patching up get_script_prefix doesn't seem to do the trick, - # and patching it in the right module requires unwarranted - # intimacy with Piston. So just go through the proper call and - # set the prefix. But clean this up after the test or it will - # break other tests! - original_prefix = get_script_prefix() - self.addCleanup( - django.core.urlresolvers.set_script_prefix, original_prefix) - django.core.urlresolvers.set_script_prefix(script_name) - - def test_handler_uris_are_absolute(self): - params = self.make_params() - server = params['SERVER_NAME'] - - # Without this, the test wouldn't be able to detect accidental - # duplication of the script_name portion of the URL path: - # /MAAS/MAAS/api/... - self.patch_script_prefix(self.script_name) - - description = self.get_description(params) - - expected_uri = AfterPreprocessing( - urlparse, MatchesStructure( - scheme=Equals(self.scheme), hostname=Equals(server), - # The path is always the script name followed by "api/" - # because all API calls are within the "api" tree. - path=StartsWith(self.script_name + "/api/"))) - expected_handler = MatchesAny( - Is(None), AfterPreprocessing(itemgetter("uri"), expected_uri)) - expected_resource = MatchesAll( - AfterPreprocessing(itemgetter("anon"), expected_handler), - AfterPreprocessing(itemgetter("auth"), expected_handler)) - resources = description["resources"] - self.assertNotEqual([], resources) - self.assertThat(resources, AllMatch(expected_resource)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_apidoc.py maas-1.7.6+bzr3376/src/maasserver/tests/test_apidoc.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_apidoc.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_apidoc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,308 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test maasserver API documentation functionality.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from inspect import getdoc -import new - -from django.conf import settings -from django.conf.urls import ( - include, - patterns, - url, - ) -from django.core.exceptions import ImproperlyConfigured -from django.core.urlresolvers import reverse -from maasserver.api_support import ( - operation, - OperationsHandler, - OperationsResource, - ) -from maasserver.apidoc import ( - describe_handler, - describe_resource, - find_api_resources, - generate_api_docs, - ) -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from mock import sentinel -from piston.doc import HandlerDocumentation -from piston.handler import BaseHandler -from piston.resource import Resource - - -class TestFindingResources(MAASServerTestCase): - """Tests for API inspection support: finding resources.""" - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/doc/', reverse('api-doc')) - - @staticmethod - def make_module(): - """Return a new module with a fabricated name.""" - name = factory.make_name("module").encode("ascii") - return new.module(name) - - def test_urlpatterns_empty(self): - # No resources are found in empty modules. - module = self.make_module() - module.urlpatterns = patterns("") - self.assertSetEqual(set(), find_api_resources(module)) - - def test_urlpatterns_not_present(self): - # The absence of urlpatterns is an error. - module = self.make_module() - self.assertRaises(ImproperlyConfigured, find_api_resources, module) - - def test_urlpatterns_with_resource_for_incomplete_handler(self): - # Resources for handlers that don't specify resource_uri are ignored. - module = self.make_module() - module.urlpatterns = patterns("", url("^foo", BaseHandler)) - self.assertSetEqual(set(), find_api_resources(module)) - - def test_urlpatterns_with_resource(self): - # Resources for handlers with resource_uri attributes are discovered - # in a urlconf module and returned. The type of resource_uri is not - # checked; it must only be present and not None. - handler = type(b"\m/", (BaseHandler,), {"resource_uri": True}) - resource = Resource(handler) - module = self.make_module() - module.urlpatterns = patterns("", url("^metal", resource)) - self.assertSetEqual({resource}, find_api_resources(module)) - - def test_nested_urlpatterns_with_handler(self): - # Resources are found in nested urlconfs. - handler = type(b"\m/", (BaseHandler,), {"resource_uri": True}) - resource = Resource(handler) - module = self.make_module() - submodule = self.make_module() - submodule.urlpatterns = patterns("", url("^metal", resource)) - module.urlpatterns = patterns("", ("^genre/", include(submodule))) - self.assertSetEqual({resource}, find_api_resources(module)) - - def test_smoke(self): - # Resources are found for the MAAS API. - from maasserver import urls_api as urlconf - self.assertNotEqual(set(), find_api_resources(urlconf)) - - -class TestGeneratingDocs(MAASServerTestCase): - """Tests for API inspection support: generating docs.""" - - @staticmethod - def make_resource(): - """ - Return a new `OperationsResource` with a `BaseHandler` subclass - handler, with a fabricated name and a `resource_uri` class-method. - """ - name = factory.make_name("handler").encode("ascii") - resource_uri = lambda cls: factory.make_name("resource-uri") - namespace = {"resource_uri": classmethod(resource_uri)} - handler = type(name, (BaseHandler,), namespace) - return OperationsResource(handler) - - def test_generates_doc_for_handler(self): - # generate_api_docs() yields HandlerDocumentation objects for the - # handlers passed in. - resource = self.make_resource() - docs = list(generate_api_docs([resource])) - self.assertEqual(1, len(docs)) - [doc] = docs - self.assertIsInstance(doc, HandlerDocumentation) - self.assertIs(type(resource.handler), doc.handler) - - def test_generates_doc_for_multiple_handlers(self): - # generate_api_docs() yields HandlerDocumentation objects for the - # handlers passed in. - resources = [self.make_resource() for _ in range(5)] - docs = list(generate_api_docs(resources)) - sorted_handlers = sorted( - [type(resource.handler) for resource in resources], - key=lambda handler_class: handler_class.__name__) - self.assertEqual( - sorted_handlers, - [doc.handler for doc in docs]) - - def test_handler_without_resource_uri(self): - # generate_api_docs() raises an exception if a handler does not have a - # resource_uri attribute. - resource = OperationsResource(BaseHandler) - docs = generate_api_docs([resource]) - error = self.assertRaises(AssertionError, list, docs) - self.assertEqual( - "Missing resource_uri in %s" % type(resource.handler).__name__, - unicode(error)) - - -class TestHandlers(MAASServerTestCase): - """Test that the handlers have all the details needed to generate the - API documentation. - """ - - def test_handlers_have_section_title(self): - from maasserver import urls_api as urlconf - resources = find_api_resources(urlconf) - handlers = [] - for doc in generate_api_docs(resources): - handlers.append(doc.handler) - handlers_missing_section_name = [ - handler.__name__ - for handler in handlers - if not hasattr(handler, 'api_doc_section_name') - ] - self.assertEqual( - [], handlers_missing_section_name, - "%d handlers are missing an api_doc_section_name field." % len( - handlers_missing_section_name)) - - -class ExampleHandler(OperationsHandler): - """An example handler.""" - - create = read = delete = None - - @operation(idempotent=False) - def non_idempotent_operation(self, request, p_foo, p_bar): - """A non-idempotent operation. - - Will piggyback on POST requests. - """ - - @operation(idempotent=True) - def idempotent_operation(self, request, p_foo, p_bar): - """An idempotent operation. - - Will piggyback on GET requests. - """ - - @classmethod - def resource_uri(cls): - # Note that the arguments, after request, to each of the ops - # above matches the parameters (index 1) in the tuple below. - return ("example_view", ["p_foo", "p_bar"]) - - -class ExampleFallbackHandler(OperationsHandler): - """An example fall-back handler.""" - - create = read = delete = update = None - - -class TestDescribingAPI(MAASServerTestCase): - """Tests for functions that describe a Piston API.""" - - def setUp(self): - super(TestDescribingAPI, self).setUp() - # Override DEFAULT_MAAS_URL so that it's stable for testing. - self.patch(settings, "DEFAULT_MAAS_URL", "http://example.com/") - - def test_describe_handler(self): - # describe_handler() returns a description of a handler that can be - # readily serialised into JSON, for example. - expected_actions = [ - {"doc": getdoc(ExampleHandler.idempotent_operation), - "method": "GET", - "name": "idempotent_operation", - "op": "idempotent_operation", - "restful": False}, - {"doc": getdoc(ExampleHandler.non_idempotent_operation), - "method": "POST", - "name": "non_idempotent_operation", - "op": "non_idempotent_operation", - "restful": False}, - {"doc": None, - "method": "PUT", - "name": "update", - "op": None, - "restful": True}, - ] - observed = describe_handler(ExampleHandler) - # The description contains several entries. - self.assertSetEqual( - {"actions", "doc", "name", "params", "path"}, - set(observed)) - self.assertEqual(ExampleHandler.__doc__, observed["doc"]) - self.assertEqual(ExampleHandler.__name__, observed["name"]) - self.assertEqual(["p_foo", "p_bar"], observed["params"]) - self.assertItemsEqual(expected_actions, observed["actions"]) - - def test_describe_handler_with_maas_handler(self): - # Ensure that describe_handler() yields something sensible with a - # "real" MAAS API handler. - from maasserver.api import NodeHandler as handler - description = describe_handler(handler) - # The RUD of CRUD actions are still available, but the C(reate) action - # has been overridden with custom non-ReSTful operations. - expected_actions = { - "DELETE delete op=None restful=True", - "GET read op=None restful=True", - "GET details op=details restful=False", - "POST start op=start restful=False", - "POST stop op=stop restful=False", - "POST release op=release restful=False", - "POST commission op=commission restful=False", - "PUT update op=None restful=True", - } - observed_actions = { - "%(method)s %(name)s op=%(op)s restful=%(restful)s" % action - for action in description["actions"] - } - self.assertSetEqual(expected_actions, observed_actions) - self.assertSetEqual({"system_id"}, set(description["params"])) - # The path is a URI Template , the - # components of which correspond to the parameters declared. - self.assertEqual( - "/api/1.0/nodes/{system_id}/", - description["path"]) - - def test_describe_resource_anonymous_resource(self): - # When the resource does not require authentication, any configured - # fallback is ignored, and only the resource's handler is described. - # The resource name comes from this handler. - self.patch(ExampleHandler, "anonymous", ExampleFallbackHandler) - resource = OperationsResource(ExampleHandler) - expected = { - "anon": describe_handler(ExampleHandler), - "auth": None, - "name": "ExampleHandler", - } - self.assertEqual(expected, describe_resource(resource)) - - def test_describe_resource_authenticated_resource(self): - # When the resource requires authentication, but has no fallback - # anonymous handler, the first is described. The resource name comes - # from this handler. - resource = OperationsResource(ExampleHandler, sentinel.auth) - expected = { - "anon": None, - "auth": describe_handler(ExampleHandler), - "name": "ExampleHandler", - } - self.assertEqual(expected, describe_resource(resource)) - - def test_describe_resource_authenticated_resource_with_fallback(self): - # When the resource requires authentication, but has a fallback - # anonymous handler, both are described. The resource name is taken - # from the authenticated handler. - self.patch(ExampleHandler, "anonymous", ExampleFallbackHandler) - resource = OperationsResource(ExampleHandler, sentinel.auth) - expected = { - "anon": describe_handler(ExampleFallbackHandler), - "auth": describe_handler(ExampleHandler), - "name": "ExampleHandler", - } - self.assertEqual(expected, describe_resource(resource)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_enlistment.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_enlistment.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_enlistment.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_enlistment.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,753 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for enlistment-related portions of the API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.contrib.auth.models import AnonymousUser -from django.core.urlresolvers import reverse -from maasserver.enum import ( - NODE_STATUS, - NODEGROUP_STATUS, - NODEGROUPINTERFACE_MANAGEMENT, - ) -from maasserver.models import ( - Node, - NodeGroup, - ) -from maasserver.testing import reload_object -from maasserver.testing.api import MultipleUsersScenarios -from maasserver.testing.architecture import make_usable_architecture -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import strip_domain -from maasserver.utils.orm import get_one -from netaddr import IPNetwork - - -class EnlistmentAPITest(MultipleUsersScenarios, - MAASServerTestCase): - """Enlistment tests.""" - scenarios = [ - ('anon', dict(userfactory=lambda: AnonymousUser())), - ('user', dict(userfactory=factory.make_user)), - ('admin', dict(userfactory=factory.make_admin)), - ] - - def test_POST_new_creates_node(self): - architecture = make_usable_architecture(self) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'architecture': architecture, - 'power_type': 'ether_wake', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertIn('application/json', response['Content-Type']) - self.assertEqual('diane', parsed_result['hostname']) - self.assertNotEqual(0, len(parsed_result.get('system_id'))) - [diane] = Node.objects.filter(hostname='diane') - self.assertEqual(architecture, diane.architecture) - - def test_POST_new_generates_hostname_if_ip_based_hostname(self): - hostname = '192-168-5-19.domain' - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': hostname, - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.OK, response.status_code) - system_id = parsed_result.get('system_id') - node = Node.objects.get(system_id=system_id) - self.assertNotEqual(hostname, node.hostname) - - def test_POST_new_creates_node_with_power_parameters(self): - # We're setting power parameters so we disable start_commissioning to - # prevent anything from attempting to issue power instructions. - self.patch(Node, "start_commissioning") - hostname = factory.make_name("hostname") - architecture = make_usable_architecture(self) - power_type = 'ipmi' - power_parameters = { - "power_user": factory.make_name("power-user"), - "power_pass": factory.make_name("power-pass"), - } - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': hostname, - 'architecture': architecture, - 'power_type': 'ether_wake', - 'mac_addresses': factory.getRandomMACAddress(), - 'power_parameters': json.dumps(power_parameters), - 'power_type': power_type, - }) - self.assertEqual(httplib.OK, response.status_code) - [node] = Node.objects.filter(hostname=hostname) - self.assertEqual(power_parameters, node.power_parameters) - self.assertEqual(power_type, node.power_type) - - def test_POST_new_creates_node_with_arch_only(self): - architecture = make_usable_architecture(self, subarch_name="generic") - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'architecture': architecture.split('/')[0], - 'power_type': 'ether_wake', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertIn('application/json', response['Content-Type']) - self.assertEqual('diane', parsed_result['hostname']) - self.assertNotEqual(0, len(parsed_result.get('system_id'))) - [diane] = Node.objects.filter(hostname='diane') - self.assertEqual(architecture, diane.architecture) - - def test_POST_new_creates_node_with_subarchitecture(self): - # The API allows a Node to be created. - architecture = make_usable_architecture(self) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'architecture': architecture.split('/')[0], - 'subarchitecture': architecture.split('/')[1], - 'power_type': 'ether_wake', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertIn('application/json', response['Content-Type']) - self.assertEqual('diane', parsed_result['hostname']) - self.assertNotEqual(0, len(parsed_result.get('system_id'))) - [diane] = Node.objects.filter(hostname='diane') - self.assertEqual(architecture, diane.architecture) - - def test_POST_new_fails_node_with_double_subarchitecture(self): - architecture = make_usable_architecture(self) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'architecture': architecture, - 'subarchitecture': architecture.split('/')[1], - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual( - "Subarchitecture cannot be specified twice.", - response.content) - - def test_POST_new_associates_mac_addresses(self): - # The API allows a Node to be created and associated with MAC - # Addresses. - architecture = make_usable_architecture(self) - self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'architecture': architecture, - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - diane = get_one(Node.objects.filter(hostname='diane')) - self.assertItemsEqual( - ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - [mac.mac_address for mac in diane.macaddress_set.all()]) - - def test_POST_new_initializes_nodegroup_to_master_by_default(self): - hostname = factory.make_name('host') - self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': hostname, - 'architecture': make_usable_architecture(self), - 'mac_addresses': [factory.getRandomMACAddress()], - }) - self.assertEqual( - NodeGroup.objects.ensure_master(), - Node.objects.get(hostname=hostname).nodegroup) - - def test_POST_with_no_hostname_auto_populates_hostname(self): - architecture = make_usable_architecture(self) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': architecture, - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }) - node = Node.objects.get( - system_id=json.loads(response.content)['system_id']) - self.assertEqual(5, len(strip_domain(node.hostname))) - - def test_POST_fails_without_operation(self): - # If there is no operation ('op=operation_name') specified in the - # request data, a 'Bad request' response is returned. - response = self.client.post( - reverse('nodes_handler'), - { - 'hostname': 'diane', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', 'invalid'], - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual( - "Unrecognised signature: POST None", - response.content) - - def test_POST_new_fails_if_autodetect_nodegroup_required(self): - # If new() is called with no nodegroup, we require the client to - # explicitly also supply autodetect_nodegroup (with any value) - # to force the autodetection. If it's not supplied then an error - # is raised. - architecture = make_usable_architecture(self) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'architecture': architecture, - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual( - "'autodetect_nodegroup' must be specified if 'nodegroup' " - "parameter missing", response.content) - - def test_POST_fails_if_mac_duplicated(self): - # Mac Addresses should be unique. - mac = 'aa:bb:cc:dd:ee:ff' - factory.make_mac_address(mac) - architecture = make_usable_architecture(self) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': architecture, - 'hostname': factory.getRandomString(), - 'mac_addresses': [mac], - }) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('application/json', response['Content-Type']) - self.assertEqual( - ["Mac address %s already in use." % mac], - parsed_result['mac_addresses']) - - def test_POST_fails_with_bad_operation(self): - # If the operation ('op=operation_name') specified in the - # request data is unknown, a 'Bad request' response is returned. - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'invalid_operation', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', 'invalid'], - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - "Unrecognised signature: POST invalid_operation", - response.content) - - def test_POST_new_rejects_invalid_data(self): - # If the data provided to create a node with an invalid MAC - # Address, a 'Bad request' response is returned. - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', 'invalid'], - }) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('application/json', response['Content-Type']) - self.assertEqual( - ["One or more MAC addresses is invalid."], - parsed_result['mac_addresses']) - - def test_POST_invalid_architecture_returns_bad_request(self): - # If the architecture name provided to create a node is not a valid - # architecture name, a 'Bad request' response is returned. - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': 'diane', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff'], - 'architecture': 'invalid-architecture', - }) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('application/json', response['Content-Type']) - self.assertItemsEqual(['architecture'], parsed_result) - - -class NodeHostnameEnlistmentTest(MultipleUsersScenarios, - MAASServerTestCase): - - scenarios = [ - ('anon', dict(userfactory=lambda: AnonymousUser())), - ('user', dict(userfactory=factory.make_user)), - ('admin', dict(userfactory=factory.make_admin)), - ] - - def test_created_node_has_domain_from_cluster(self): - hostname_without_domain = factory.make_name('hostname') - hostname_with_domain = '%s.%s' % ( - hostname_without_domain, factory.getRandomString()) - domain = factory.make_name('domain') - factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, - name=domain, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': hostname_with_domain, - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - expected_hostname = '%s.%s' % (hostname_without_domain, domain) - self.assertEqual( - expected_hostname, parsed_result.get('hostname')) - - def test_created_node_gets_domain_from_cluster_appended(self): - hostname_without_domain = factory.make_name('hostname') - domain = factory.make_name('domain') - factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, - name=domain, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': hostname_without_domain, - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - expected_hostname = '%s.%s' % (hostname_without_domain, domain) - self.assertEqual( - expected_hostname, parsed_result.get('hostname')) - - def test_created_node_nodegroup_is_inferred_from_origin_network(self): - network = IPNetwork('192.168.0.3/24') - origin_ip = factory.getRandomIPInNetwork(network) - NodeGroup.objects.ensure_master() - nodegroup = factory.make_node_group(network=network) - response = self.client.post( - reverse('nodes_handler'), - data={ - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.make_name('hostname'), - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }, - REMOTE_ADDR=origin_ip) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - node = Node.objects.get(system_id=parsed_result.get('system_id')) - self.assertEqual(nodegroup, node.nodegroup) - - def test_created_node_uses_default_nodegroup_if_origin_not_found(self): - unknown_host = factory.make_name('host') - response = self.client.post( - reverse('nodes_handler'), - data={ - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.make_name('hostname'), - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': [factory.getRandomMACAddress()], - }, - HTTP_HOST=unknown_host) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - node = Node.objects.get(system_id=parsed_result.get('system_id')) - self.assertEqual(NodeGroup.objects.ensure_master(), node.nodegroup) - - -class NonAdminEnlistmentAPITest(MultipleUsersScenarios, - MAASServerTestCase): - # Enlistment tests for non-admin users. - - scenarios = [ - ('anon', dict(userfactory=lambda: AnonymousUser())), - ('user', dict(userfactory=factory.make_user)), - ] - - def test_POST_non_admin_creates_node_in_declared_state(self): - # Upon non-admin enlistment, a node goes into the Declared - # state. Deliberate approval is required before we start - # reinstalling the system, wiping its disks etc. - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': make_usable_architecture(self), - 'mac_addresses': ['aa:bb:cc:dd:ee:ff'], - }) - self.assertEqual(httplib.OK, response.status_code) - system_id = json.loads(response.content)['system_id'] - self.assertEqual( - NODE_STATUS.DECLARED, - Node.objects.get(system_id=system_id).status) - - -class AnonymousEnlistmentAPITest(MAASServerTestCase): - # Enlistment tests specific to anonymous users. - - def test_POST_accept_not_allowed(self): - # An anonymous user is not allowed to accept an anonymously - # enlisted node. That would defeat the whole purpose of holding - # those nodes for approval. - node_id = factory.make_node(status=NODE_STATUS.DECLARED).system_id - response = self.client.post( - reverse('nodes_handler'), {'op': 'accept', 'nodes': [node_id]}) - self.assertEqual( - (httplib.UNAUTHORIZED, "You must be logged in to accept nodes."), - (response.status_code, response.content)) - - def test_POST_returns_limited_fields(self): - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': make_usable_architecture(self), - 'hostname': factory.getRandomString(), - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [ - 'hostname', - 'owner', - 'system_id', - 'macaddress_set', - 'architecture', - 'status', - 'netboot', - 'power_type', - 'tag_names', - 'ip_addresses', - 'resource_uri', - 'cpu_count', - 'storage', - 'memory', - 'routers', - 'zone', - ], - list(parsed_result)) - - -class SimpleUserLoggedInEnlistmentAPITest(MAASServerTestCase): - """Enlistment tests from the perspective of regular, non-admin users.""" - - def test_POST_accept_not_allowed(self): - # An non-admin user is not allowed to accept an anonymously - # enlisted node. That would defeat the whole purpose of holding - # those nodes for approval. - self.client_log_in() - node_id = factory.make_node(status=NODE_STATUS.DECLARED).system_id - response = self.client.post( - reverse('nodes_handler'), {'op': 'accept', 'nodes': [node_id]}) - self.assertEqual( - (httplib.FORBIDDEN, - "You don't have the required permission to accept the " - "following node(s): %s." % node_id), - (response.status_code, response.content)) - - def test_POST_accept_all_does_not_accept_anything(self): - # It is not an error for a non-admin user to attempt to accept all - # anonymously enlisted nodes, but only those for which he/she has - # admin privs will be accepted, which currently equates to none of - # them. - self.client_log_in() - factory.make_node(status=NODE_STATUS.DECLARED), - factory.make_node(status=NODE_STATUS.DECLARED), - response = self.client.post( - reverse('nodes_handler'), {'op': 'accept_all'}) - self.assertEqual(httplib.OK, response.status_code) - nodes_returned = json.loads(response.content) - self.assertEqual([], nodes_returned) - - def test_POST_simple_user_can_set_power_type_and_parameters(self): - self.client_log_in() - new_power_address = factory.getRandomString() - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'power_parameters': json.dumps( - {"power_address": new_power_address}), - 'mac_addresses': ['AA:BB:CC:DD:EE:FF'], - }) - - node = Node.objects.get( - system_id=json.loads(response.content)['system_id']) - self.assertEqual( - (httplib.OK, {"power_address": new_power_address}, - 'ether_wake'), - (response.status_code, node.power_parameters, - node.power_type)) - - def test_POST_returns_limited_fields(self): - self.client_log_in() - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': make_usable_architecture(self), - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [ - 'hostname', - 'owner', - 'system_id', - 'macaddress_set', - 'architecture', - 'status', - 'netboot', - 'power_type', - 'resource_uri', - 'tag_names', - 'ip_addresses', - 'cpu_count', - 'storage', - 'memory', - 'routers', - 'zone', - ], - list(parsed_result)) - - -class AdminLoggedInEnlistmentAPITest(MAASServerTestCase): - """Enlistment tests from the perspective of admin users.""" - - def test_POST_new_sets_power_type_if_admin(self): - self.client_log_in(as_admin=True) - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': ['00:11:22:33:44:55'], - }) - node = Node.objects.get( - system_id=json.loads(response.content)['system_id']) - self.assertEqual('ether_wake', node.power_type) - self.assertEqual('', node.power_parameters) - - def test_POST_new_sets_power_parameters_field(self): - # The api allows the setting of a Node's power_parameters field. - # Create a power_parameter valid for the selected power_type. - self.client_log_in(as_admin=True) - new_mac_address = factory.getRandomMACAddress() - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'power_parameters_mac_address': new_mac_address, - 'mac_addresses': ['AA:BB:CC:DD:EE:FF'], - }) - - self.assertEqual(httplib.OK, response.status_code, response.content) - node = Node.objects.get( - system_id=json.loads(response.content)['system_id']) - self.assertEqual( - {'mac_address': new_mac_address}, - reload_object(node).power_parameters) - - def test_POST_updates_power_parameters_rejects_unknown_param(self): - self.client_log_in(as_admin=True) - hostname = factory.getRandomString() - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': hostname, - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'power_parameters_unknown_param': factory.getRandomString(), - 'mac_addresses': [factory.getRandomMACAddress()], - }) - - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'power_parameters': ["Unknown parameter(s): unknown_param."]} - ), - (response.status_code, json.loads(response.content))) - self.assertFalse(Node.objects.filter(hostname=hostname).exists()) - - def test_POST_new_sets_power_parameters_skip_check(self): - # The api allows to skip the validation step and set arbitrary - # power parameters. - self.client_log_in(as_admin=True) - param = factory.getRandomString() - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'power_parameters_param': param, - 'power_parameters_skip_check': 'true', - 'mac_addresses': ['AA:BB:CC:DD:EE:FF'], - }) - - node = Node.objects.get( - system_id=json.loads(response.content)['system_id']) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - {'param': param}, - reload_object(node).power_parameters) - - def test_POST_admin_creates_node_in_commissioning_state(self): - # When an admin user enlists a node, it goes into the - # Commissioning state. - self.client_log_in(as_admin=True) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff'], - }) - self.assertEqual(httplib.OK, response.status_code) - system_id = json.loads(response.content)['system_id'] - self.assertEqual( - NODE_STATUS.COMMISSIONING, - Node.objects.get(system_id=system_id).status) - - def test_POST_returns_limited_fields(self): - self.client_log_in(as_admin=True) - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': make_usable_architecture(self), - 'power_type': 'ether_wake', - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [ - 'hostname', - 'owner', - 'system_id', - 'macaddress_set', - 'architecture', - 'status', - 'netboot', - 'power_type', - 'resource_uri', - 'tag_names', - 'ip_addresses', - 'cpu_count', - 'storage', - 'memory', - 'routers', - 'zone', - ], - list(parsed_result)) - - def test_POST_accept_all(self): - # An admin user can accept all anonymously enlisted nodes. - self.client_log_in(as_admin=True) - nodes = [ - factory.make_node(status=NODE_STATUS.DECLARED), - factory.make_node(status=NODE_STATUS.DECLARED), - ] - response = self.client.post( - reverse('nodes_handler'), {'op': 'accept_all'}) - self.assertEqual(httplib.OK, response.status_code) - nodes_returned = json.loads(response.content) - self.assertSetEqual( - {node.system_id for node in nodes}, - {node["system_id"] for node in nodes_returned}) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_filestorage.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_filestorage.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_filestorage.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_filestorage.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,387 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for file-storage API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from base64 import b64decode -import httplib -import json -import os -import shutil - -from django.conf import settings -from django.core.urlresolvers import reverse -from fixtures import Fixture -from maasserver.models import FileStorage -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maastesting.utils import sample_binary_data -from testtools.matchers import ( - Contains, - Equals, - MatchesListwise, - ) - - -class MediaRootFixture(Fixture): - """Create and clear-down a `settings.MEDIA_ROOT` directory. - - The directory must not previously exist. - """ - - def setUp(self): - super(MediaRootFixture, self).setUp() - self.path = settings.MEDIA_ROOT - if os.path.exists(self.path): - raise AssertionError("See media/README") - self.addCleanup(shutil.rmtree, self.path, ignore_errors=True) - os.mkdir(self.path) - - -class FileStorageAPITestMixin: - - def setUp(self): - super(FileStorageAPITestMixin, self).setUp() - media_root = self.useFixture(MediaRootFixture()).path - self.tmpdir = os.path.join(media_root, "testing") - os.mkdir(self.tmpdir) - - def _create_API_params(self, op=None, filename=None, fileObj=None): - params = {} - if op is not None: - params["op"] = op - if filename is not None: - params["filename"] = filename - if fileObj is not None: - params["file"] = fileObj - return params - - def make_API_POST_request(self, op=None, filename=None, fileObj=None): - """Make an API POST request and return the response.""" - params = self._create_API_params(op, filename, fileObj) - return self.client.post(reverse('files_handler'), params) - - def make_API_GET_request(self, op=None, filename=None, fileObj=None): - """Make an API GET request and return the response.""" - params = self._create_API_params(op, filename, fileObj) - return self.client.get(reverse('files_handler'), params) - - -class AnonymousFileStorageAPITest(FileStorageAPITestMixin, MAASServerTestCase): - - def test_get_works_anonymously(self): - storage = factory.make_file_storage() - response = self.make_API_GET_request("get", storage.filename) - - self.assertEqual(storage.content, response.content) - self.assertEqual(httplib.OK, response.status_code) - - def test_get_fetches_the_most_recent_file(self): - filename = factory.make_name('file') - factory.make_file_storage(filename=filename, owner=factory.make_user()) - storage = factory.make_file_storage( - filename=filename, owner=factory.make_user()) - response = self.make_API_GET_request("get", filename) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(storage.content, response.content) - - def test_get_by_key_works_anonymously(self): - storage = factory.make_file_storage() - response = self.client.get( - reverse('files_handler'), {'key': storage.key, 'op': 'get_by_key'}) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(storage.content, response.content) - - def test_anon_resource_uri_allows_anonymous_access(self): - storage = factory.make_file_storage() - response = self.client.get(storage.anon_resource_uri) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(storage.content, response.content) - - def test_anon_cannot_list_files(self): - factory.make_file_storage() - response = self.make_API_GET_request("list") - # The 'list' operation is not available to anon users. - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - - def test_anon_cannot_get_file(self): - storage = factory.make_file_storage() - response = self.client.get( - reverse('file_handler', args=[storage.filename])) - self.assertEqual(httplib.UNAUTHORIZED, response.status_code) - - def test_anon_cannot_delete_file(self): - storage = factory.make_file_storage() - response = self.client.delete( - reverse('file_handler', args=[storage.filename])) - self.assertEqual(httplib.UNAUTHORIZED, response.status_code) - - -class FileStorageAPITest(FileStorageAPITestMixin, APITestCase): - - def test_files_handler_path(self): - self.assertEqual( - '/api/1.0/files/', reverse('files_handler')) - - def test_file_handler_path(self): - self.assertEqual( - '/api/1.0/files/filename/', - reverse('file_handler', args=['filename'])) - - def test_add_file_succeeds(self): - response = self.make_API_POST_request( - "add", factory.make_name('upload'), factory.make_file_upload()) - self.assertEqual(httplib.CREATED, response.status_code) - - def test_add_file_with_slashes_in_name_succeeds(self): - filename = "filename/with/slashes/in/it" - response = self.make_API_POST_request( - "add", filename, factory.make_file_upload()) - self.assertEqual(httplib.CREATED, response.status_code) - self.assertItemsEqual( - [filename], - FileStorage.objects.filter( - filename=filename).values_list('filename', flat=True)) - - def test_add_file_fails_with_no_filename(self): - response = self.make_API_POST_request( - "add", fileObj=factory.make_file_upload()) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual("Filename not supplied", response.content) - - def test_add_empty_file(self): - filename = "filename" - response = self.make_API_POST_request( - "add", filename=filename, - fileObj=factory.make_file_upload(content=b'')) - self.assertEqual(httplib.CREATED, response.status_code) - self.assertItemsEqual( - [filename], - FileStorage.objects.filter( - filename=filename).values_list('filename', flat=True)) - - def test_add_file_fails_with_no_file_attached(self): - response = self.make_API_POST_request("add", "foo") - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual("File not supplied", response.content) - - def test_add_file_fails_with_too_many_files(self): - foo = factory.make_file_upload(name='foo') - foo2 = factory.make_file_upload(name='foo2') - - response = self.client.post( - reverse('files_handler'), - { - "op": "add", - "filename": "foo", - "file": foo, - "file2": foo2, - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual("Exactly one file must be supplied", response.content) - - def test_add_file_can_overwrite_existing_file_of_same_name(self): - # Write file one. - response = self.make_API_POST_request( - "add", "foo", factory.make_file_upload(content=b"file one")) - self.assertEqual(httplib.CREATED, response.status_code) - - # Write file two with the same name but different contents. - response = self.make_API_POST_request( - "add", "foo", factory.make_file_upload(content=b"file two")) - self.assertEqual(httplib.CREATED, response.status_code) - - # Retrieve the file and check its contents are the new contents. - response = self.make_API_GET_request("get", "foo") - self.assertEqual(b"file two", response.content) - - def test_get_file_succeeds(self): - factory.make_file_storage( - filename="foofilers", content=b"give me rope") - response = self.make_API_GET_request("get", "foofilers") - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(b"give me rope", response.content) - - def test_get_file_fails_with_no_filename(self): - response = self.make_API_GET_request("get") - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual("No provided filename!", response.content) - - def test_get_file_fails_with_missing_file(self): - response = self.make_API_GET_request("get", filename="missingfilename") - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - self.assertIn('text/plain', response['Content-Type']) - self.assertEqual("File not found", response.content) - - def test_list_files_returns_ordered_list(self): - filenames = ["myfiles/a", "myfiles/z", "myfiles/b"] - for filename in filenames: - factory.make_file_storage( - filename=filename, content=b"test content", - owner=self.logged_in_user) - response = self.make_API_GET_request("list") - self.assertEqual(httplib.OK, response.status_code) - parsed_results = json.loads(response.content) - filenames = [result['filename'] for result in parsed_results] - self.assertEqual(sorted(filenames), filenames) - - def test_list_files_filters_by_owner(self): - factory.make_file_storage(owner=factory.make_user()) - response = self.make_API_GET_request("list") - self.assertEqual(httplib.OK, response.status_code) - parsed_results = json.loads(response.content) - self.assertEqual([], parsed_results) - - def test_list_files_lists_files_with_prefix(self): - filenames_with_prefix = ["prefix-file1", "prefix-file2"] - filenames = filenames_with_prefix + ["otherfile", "otherfile2"] - for filename in filenames: - factory.make_file_storage( - filename=filename, content=b"test content", - owner=self.logged_in_user) - response = self.client.get( - reverse('files_handler'), {"op": "list", "prefix": "prefix-"}) - self.assertEqual(httplib.OK, response.status_code) - parsed_results = json.loads(response.content) - filenames = [result['filename'] for result in parsed_results] - self.assertItemsEqual(filenames_with_prefix, filenames) - - def test_list_files_does_not_include_file_content(self): - factory.make_file_storage( - filename="filename", content=b"test content", - owner=self.logged_in_user) - response = self.make_API_GET_request("list") - parsed_results = json.loads(response.content) - self.assertNotIn('content', parsed_results[0].keys()) - - def test_files_resource_uri_supports_slashes_in_filenames(self): - filename = "a/filename/with/slashes/in/it/" - factory.make_file_storage( - filename=filename, content=b"test content", - owner=self.logged_in_user) - response = self.make_API_GET_request("list") - parsed_results = json.loads(response.content) - resource_uri = parsed_results[0]['resource_uri'] - expected_uri = reverse('file_handler', args=[filename]) - self.assertEqual(expected_uri, resource_uri) - - def test_api_supports_slashes_in_filenames_roundtrip_test(self): - # Do a roundtrip (upload a file then get it) for a file with a - # name that contains slashes. - filename = "filename/with/slashes/in/it" - self.make_API_POST_request( - "add", filename, factory.make_file_upload()) - file_url = reverse('file_handler', args=[filename]) - # The file url contains the filename without any kind of - # escaping. - self.assertIn(filename, file_url) - response = self.client.get(file_url) - parsed_result = json.loads(response.content) - self.assertEqual(filename, parsed_result['filename']) - - def test_get_file_returns_file_object_with_content_base64_encoded(self): - filename = factory.make_name("file") - content = sample_binary_data - factory.make_file_storage( - filename=filename, content=content, owner=self.logged_in_user) - response = self.client.get( - reverse('file_handler', args=[filename])) - parsed_result = json.loads(response.content) - self.assertEqual( - (filename, content), - ( - parsed_result['filename'], - b64decode(parsed_result['content']) - )) - - def test_get_file_returns_file_object_with_resource_uri(self): - filename = factory.make_name("file") - content = sample_binary_data - factory.make_file_storage( - filename=filename, content=content, owner=self.logged_in_user) - response = self.client.get( - reverse('file_handler', args=[filename])) - parsed_result = json.loads(response.content) - self.assertEqual( - reverse('file_handler', args=[filename]), - parsed_result['resource_uri']) - - def test_get_file_returns_owned_file(self): - # If both an owned file and a non-owned file are present (with the - # same name), the owned file is returned. - filename = factory.make_name("file") - factory.make_file_storage(filename=filename, owner=None) - content = sample_binary_data - storage = factory.make_file_storage( - filename=filename, content=content, owner=self.logged_in_user) - response = self.client.get( - reverse('file_handler', args=[filename])) - parsed_result = json.loads(response.content) - self.assertEqual( - (filename, storage.anon_resource_uri, content), - ( - parsed_result['filename'], - parsed_result['anon_resource_uri'], - b64decode(parsed_result['content']) - )) - - def test_get_file_returning_404_file_includes_header(self): - # In order to fix bug 1123986 we need to distinguish between - # a 404 returned when the file is not present and a 404 returned - # when the API endpoint is not present. We do this by setting - # a header: "Workaround: bug1123986". - response = self.client.get( - reverse('file_handler', args=[factory.make_name("file")])) - self.assertThat( - ( - response.status_code, - response.items(), - ), - MatchesListwise( - ( - Equals(httplib.NOT_FOUND), - Contains(('Workaround', 'bug1123986')), - )), - response) - - def test_delete_filters_by_owner(self): - storage = factory.make_file_storage(owner=factory.make_user()) - response = self.client.delete( - reverse('file_handler', args=[storage.filename])) - self.assertEqual(httplib.NOT_FOUND, response.status_code) - files = FileStorage.objects.filter(filename=storage.filename) - self.assertEqual([storage], list(files)) - - def test_delete_file_deletes_file(self): - filename = factory.make_name('file') - factory.make_file_storage( - filename=filename, content=b"test content", - owner=self.logged_in_user) - response = self.client.delete( - reverse('file_handler', args=[filename])) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - files = FileStorage.objects.filter(filename=filename) - self.assertEqual([], list(files)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_macaddress.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_macaddress.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_macaddress.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_macaddress.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,195 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for MAC-address management in the API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.enum import NODE_STATUS -from maasserver.models import MACAddress -from maasserver.testing import reload_object -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory - - -class MACAddressAPITest(APITestCase): - - def test_macs_handler_path(self): - self.assertEqual( - '/api/1.0/nodes/node-id/macs/', - reverse('node_macs_handler', args=['node-id'])) - - def test_mac_handler_path(self): - self.assertEqual( - '/api/1.0/nodes/node-id/macs/mac/', - reverse('node_mac_handler', args=['node-id', 'mac'])) - - def createNodeWithMacs(self, owner=None): - node = factory.make_node(owner=owner) - mac1 = node.add_mac_address('aa:bb:cc:dd:ee:ff') - mac2 = node.add_mac_address('22:bb:cc:dd:aa:ff') - return node, mac1, mac2 - - def test_macs_GET(self): - # The api allows for fetching the list of the MAC address for a node. - node, mac1, mac2 = self.createNodeWithMacs() - response = self.client.get( - reverse('node_macs_handler', args=[node.system_id])) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(2, len(parsed_result)) - self.assertEqual(mac1.mac_address, parsed_result[0]['mac_address']) - self.assertEqual(mac2.mac_address, parsed_result[1]['mac_address']) - - def test_macs_GET_not_found(self): - # When fetching MAC addresses, the api returns a 'Not Found' (404) - # error if no node is found. - url = reverse('node_macs_handler', args=['invalid-id']) - response = self.client.get(url) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_macs_GET_node_not_found(self): - # When fetching a MAC address, the api returns a 'Not Found' (404) - # error if the MAC address does not exist. - node = factory.make_node() - response = self.client.get( - reverse( - 'node_mac_handler', - args=[node.system_id, '00-aa-22-cc-44-dd'])) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_macs_GET_node_bad_request(self): - # When fetching a MAC address, the api returns a 'Bad Request' (400) - # error if the MAC address is not valid. - node = factory.make_node() - url = reverse('node_mac_handler', args=[node.system_id, 'invalid-mac']) - response = self.client.get(url) - - self.assertEqual(400, response.status_code) - - def test_macs_POST_add_mac(self): - # The api allows to add a MAC address to an existing node. - node = factory.make_node(owner=self.logged_in_user) - nb_macs = MACAddress.objects.filter(node=node).count() - response = self.client.post( - reverse('node_macs_handler', args=[node.system_id]), - {'mac_address': '01:BB:CC:DD:EE:FF'}) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual('01:BB:CC:DD:EE:FF', parsed_result['mac_address']) - self.assertEqual( - nb_macs + 1, - MACAddress.objects.filter(node=node).count()) - - def test_macs_POST_add_mac_without_edit_perm(self): - # Adding a MAC address to a node requires the NODE_PERMISSION.EDIT - # permission. - node = factory.make_node() - response = self.client.post( - reverse('node_macs_handler', args=[node.system_id]), - {'mac_address': '01:BB:CC:DD:EE:FF'}) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_macs_POST_add_mac_invalid(self): - # A 'Bad Request' response is returned if one tries to add an invalid - # MAC address to a node. - node = self.createNodeWithMacs(self.logged_in_user)[0] - response = self.client.post( - reverse('node_macs_handler', args=[node.system_id]), - {'mac_address': 'invalid-mac'}) - parsed_result = json.loads(response.content) - - self.assertEqual(400, response.status_code) - self.assertEqual(['mac_address'], list(parsed_result)) - self.assertEqual( - ["Enter a valid MAC address (e.g. AA:BB:CC:DD:EE:FF)."], - parsed_result['mac_address']) - - def test_macs_DELETE_mac(self): - # The api allows to delete a MAC address. - node, mac1, mac2 = self.createNodeWithMacs(self.logged_in_user) - nb_macs = node.macaddress_set.count() - response = self.client.delete( - reverse( - 'node_mac_handler', - args=[node.system_id, mac1.mac_address])) - - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertEqual( - nb_macs - 1, - node.macaddress_set.count()) - - def test_macs_DELETE_disconnects_from_network(self): - network = factory.make_network() - node = factory.make_node(owner=self.logged_in_user) - mac = factory.make_mac_address(node=node, networks=[network]) - response = self.client.delete( - reverse( - 'node_mac_handler', args=[node.system_id, mac.mac_address])) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertIsNotNone(reload_object(network)) - - def test_macs_DELETE_mac_forbidden(self): - # When deleting a MAC address, the api returns a 'Forbidden' (403) - # error if the node is not visible to the logged-in user. - node, mac1, _ = self.createNodeWithMacs() - factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - response = self.client.delete( - reverse( - 'node_mac_handler', - args=[node.system_id, mac1.mac_address])) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_macs_DELETE_not_found(self): - # When deleting a MAC address, the api returns a 'Not Found' (404) - # error if no existing MAC address is found. - node = factory.make_node(owner=self.logged_in_user) - response = self.client.delete( - reverse( - 'node_mac_handler', - args=[node.system_id, '00-aa-22-cc-44-dd'])) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_macs_DELETE_forbidden(self): - # When deleting a MAC address, the api returns a 'Forbidden' - # (403) error if the user does not have the 'edit' permission on the - # node. - node = factory.make_node(owner=self.logged_in_user) - response = self.client.delete( - reverse( - 'node_mac_handler', - args=[node.system_id, '00-aa-22-cc-44-dd'])) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_macs_DELETE_bad_request(self): - # When deleting a MAC address, the api returns a 'Bad Request' (400) - # error if the provided MAC address is not valid. - node = factory.make_node() - response = self.client.delete( - reverse( - 'node_mac_handler', - args=[node.system_id, 'invalid-mac'])) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_mechanism.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_mechanism.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_mechanism.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_mechanism.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Testing of the API infrastructure, as opposed to code that uses it to -export API methods. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from maasserver.api import operation -from maasserver.testing.factory import factory -from maastesting.testcase import MAASTestCase - - -class TestOperationDecorator(MAASTestCase): - """Testing for the `operation` decorator.""" - - def test_valid_decoration(self): - value = "value" + factory.getRandomString() - decorate = operation(idempotent=False) - decorated = decorate(lambda: value) - self.assertEqual(value, decorated()) - - def test_can_passexported_as(self): - # Test that passing the optional "exported_as" works as expected. - randomexported_name = factory.make_name("exportedas", sep='') - decorate = operation( - idempotent=False, exported_as=randomexported_name) - decorated = decorate(lambda: None) - self.assertEqual(randomexported_name, decorated.export[1]) - - def testexported_as_is_optional(self): - # If exported_as is not passed then we expect the function to be - # exported in the API using the actual function name itself. - - def exported_function(): - pass - - decorate = operation(idempotent=True) - decorated = decorate(exported_function) - self.assertEqual("exported_function", decorated.export[1]) - - def test_idempotent_uses_GET(self): - # If a function is declared as idempotent the export signature - # includes the HTTP GET method. - func = lambda: None - self.assertEqual( - ("GET", func.__name__), - operation(idempotent=True)(func).export) - - def test_non_idempotent_uses_POST(self): - # If a function is declared as not idempotent the export signature - # includes the HTTP POST method. - func = lambda: None - self.assertEqual( - ("POST", func.__name__), - operation(idempotent=False)(func).export) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_network.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_network.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_network.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_network.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,395 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the `Network` API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.enum import NODE_STATUS -from maasserver.testing import reload_object -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory - - -class TestNetwork(APITestCase): - def get_url(self, name): - """Return the URL for the network of the given name.""" - return reverse('network_handler', args=[name]) - - def test_handler_path(self): - name = factory.make_name('net') - self.assertEqual('/api/1.0/networks/%s/' % name, self.get_url(name)) - - def test_POST_is_prohibited(self): - self.become_admin() - network = factory.make_network() - response = self.client.post( - self.get_url(network.name), - {'description': "New description"}) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - - def test_GET_returns_network(self): - network = factory.make_network() - - response = self.client.get(self.get_url(network.name)) - self.assertEqual(httplib.OK, response.status_code) - - parsed_result = json.loads(response.content) - self.assertEqual( - ( - network.name, - network.ip, - network.netmask, - network.vlan_tag, - network.description, - ), - ( - parsed_result['name'], - parsed_result['ip'], - parsed_result['netmask'], - parsed_result['vlan_tag'], - parsed_result['description'], - )) - - def test_GET_returns_404_for_unknown_network(self): - self.assertEqual( - httplib.NOT_FOUND, - self.client.get(self.get_url('nonesuch')).status_code) - - def test_PUT_updates_network(self): - self.become_admin() - network = factory.make_network() - new_net = factory.getRandomNetwork() - new_values = { - 'name': factory.make_name('new'), - 'ip': '%s' % new_net.cidr.ip, - 'netmask': '%s' % new_net.netmask, - 'vlan_tag': factory.make_vlan_tag(), - 'description': "Changed description", - } - - response = self.client_put(self.get_url(network.name), new_values) - self.assertEqual(httplib.OK, response.status_code) - - network = reload_object(network) - self.assertAttributes(network, new_values) - - def test_PUT_requires_admin(self): - description = "Original description" - network = factory.make_network(description=description) - response = self.client_put( - self.get_url(network.name), {'description': "Changed description"}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertEqual(description, reload_object(network).description) - - def test_PUT_returns_404_for_unknown_network(self): - self.become_admin() - self.assertEqual( - httplib.NOT_FOUND, - self.client_put(self.get_url('nonesuch')).status_code) - - def test_DELETE_deletes_network(self): - self.become_admin() - network = factory.make_network() - response = self.client.delete(self.get_url(network.name)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertIsNone(reload_object(network)) - - def test_DELETE_requires_admin(self): - network = factory.make_network() - response = self.client.delete(self.get_url(network.name)) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertIsNotNone(reload_object(network)) - - def test_DELETE_is_idempotent(self): - name = factory.make_name('no-net') - self.become_admin() - response1 = self.client.delete(self.get_url(name)) - response2 = self.client.delete(self.get_url(name)) - self.assertEqual(response1.status_code, response2.status_code) - - def test_DELETE_works_with_MACs_attached(self): - self.become_admin() - network = factory.make_network() - mac = factory.make_mac_address(networks=[network]) - response = self.client.delete(self.get_url(network.name)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertIsNone(reload_object(network)) - mac = reload_object(mac) - self.assertEqual([], list(mac.networks.all())) - - def test_POST_connect_macs_connects_macs_to_network(self): - self.become_admin() - network = factory.make_network() - macs = [factory.make_mac_address(networks=[network]) for _ in range(2)] - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [mac.mac_address for mac in macs], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual(set(macs), set(network.macaddress_set.all())) - - def test_POST_connect_macs_accepts_empty_macs_list(self): - self.become_admin() - network = factory.make_network() - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual([], list(network.macaddress_set.all())) - - def test_POST_connect_macs_leaves_other_networks_unchanged(self): - self.become_admin() - network = factory.make_network() - other_network = factory.make_network() - mac = factory.make_mac_address(networks=[other_network]) - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [mac.mac_address], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual({network, other_network}, set(mac.networks.all())) - - def test_POST_connect_macs_leaves_other_MACs_unchanged(self): - self.become_admin() - network = factory.make_network() - mac = factory.make_mac_address(networks=[]) - other_mac = factory.make_mac_address(networks=[network]) - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [mac.mac_address], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual({mac, other_mac}, set(network.macaddress_set.all())) - - def test_POST_connect_macs_ignores_MACs_already_on_network(self): - self.become_admin() - network = factory.make_network() - mac = factory.make_mac_address(networks=[network]) - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [mac.mac_address], - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual({mac}, set(network.macaddress_set.all())) - - def test_POST_connect_macs_requires_admin(self): - network = factory.make_network() - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [], - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_POST_connect_macs_fails_on_unknown_MAC(self): - self.become_admin() - network = factory.make_network() - nonexistent_mac = factory.make_MAC() - response = self.client.post( - self.get_url(network.name), - { - 'op': 'connect_macs', - 'macs': [nonexistent_mac], - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - {'macs': ["Unknown MAC address(es): %s." % nonexistent_mac]}, - json.loads(response.content)) - - def test_POST_disconnect_macs_removes_MACs_from_network(self): - self.become_admin() - network = factory.make_network() - mac = factory.make_mac_address(networks=[network]) - response = self.client.post( - self.get_url(network.name), - { - 'op': 'disconnect_macs', - 'macs': [mac.mac_address], - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual([], list(mac.networks.all())) - - def test_POST_disconnect_macs_requires_admin(self): - response = self.client.post( - self.get_url(factory.make_network().name), - { - 'op': 'disconnect_macs', - 'macs': [factory.make_mac_address().mac_address], - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_POST_disconnect_macs_accepts_empty_MACs_list(self): - self.become_admin() - response = self.client.post( - self.get_url(factory.make_network().name), - { - 'op': 'disconnect_macs', - 'macs': [], - }) - self.assertEqual(httplib.OK, response.status_code) - - def test_POST_disconnect_macs_is_idempotent(self): - self.become_admin() - response = self.client.post( - self.get_url(factory.make_network().name), - { - 'op': 'disconnect_macs', - 'macs': [factory.make_mac_address().mac_address], - }) - self.assertEqual(httplib.OK, response.status_code) - - def test_POST_disconnect_macs_leaves_other_MACs_unchanged(self): - self.become_admin() - network = factory.make_network() - other_mac = factory.make_mac_address(networks=[network]) - response = self.client.post( - self.get_url(network.name), - { - 'op': 'disconnect_macs', - 'macs': [ - factory.make_mac_address(networks=[network]).mac_address - ], - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual([network], list(other_mac.networks.all())) - - def test_POST_disconnect_macs_leaves_other_networks_unchanged(self): - self.become_admin() - network = factory.make_network() - other_network = factory.make_network() - mac = factory.make_mac_address(networks=[network, other_network]) - response = self.client.post( - self.get_url(network.name), - { - 'op': 'disconnect_macs', - 'macs': [mac.mac_address], - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual([other_network], list(mac.networks.all())) - - def test_POST_disconnect_macs_fails_on_unknown_mac(self): - self.become_admin() - nonexistent_mac = factory.make_MAC() - response = self.client.post( - self.get_url(factory.make_network().name), - { - 'op': 'disconnect_macs', - 'macs': [nonexistent_mac], - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - {'macs': ["Unknown MAC address(es): %s." % nonexistent_mac]}, - json.loads(response.content)) - - -class TestListConnectedMACs(APITestCase): - """Tests for /api/1.0/network/s/?op=list_connected_macs.""" - - def make_mac(self, networks=None, owner=None, node=None): - """Create a MAC address. - - :param networks: Optional list of `Network` objects to connect the - MAC to. If omitted, the MAC will not be connected to any networks. - :param node: Optional node that will have this MAC - address. If omitted, one will be created. - :param owner: Optional owner for the node that will have this MAC - address. If omitted, one will be created. The node will be in - the "allocated" state. This parameter is ignored if a node is - provided. - """ - if networks is None: - networks = [] - if owner is None: - owner = factory.make_user() - if node is None: - node = factory.make_node(status=NODE_STATUS.ALLOCATED, owner=owner) - return factory.make_mac_address(networks=networks, node=node) - - def request_connected_macs(self, network): - """Request and return the MAC addresses attached to `network`.""" - url = reverse('network_handler', args=[network.name]) - response = self.client.get(url, {'op': 'list_connected_macs'}) - self.assertEqual(httplib.OK, response.status_code) - return json.loads(response.content) - - def extract_macs(self, returned_macs): - """Extract the textual MAC addresses from an API response.""" - return [item['mac_address'] for item in returned_macs] - - def test_returns_connected_macs(self): - network = factory.make_network() - macs = [ - self.make_mac(networks=[network], owner=self.logged_in_user) - for _ in range(3) - ] - self.assertEqual( - {mac.mac_address for mac in macs}, - set(self.extract_macs(self.request_connected_macs(network)))) - - def test_ignores_unconnected_macs(self): - self.make_mac( - networks=[factory.make_network()], owner=self.logged_in_user) - self.make_mac(networks=[], owner=self.logged_in_user) - self.assertEqual( - [], - self.request_connected_macs(factory.make_network())) - - def test_includes_MACs_for_nodes_visible_to_user(self): - network = factory.make_network() - mac = self.make_mac(networks=[network], owner=self.logged_in_user) - self.assertEqual( - [mac.mac_address], - self.extract_macs(self.request_connected_macs(network))) - - def test_excludes_MACs_for_nodes_not_visible_to_user(self): - network = factory.make_network() - self.make_mac(networks=[network]) - self.assertEqual([], self.request_connected_macs(network)) - - def test_returns_sorted_MACs(self): - network = factory.make_network() - macs = [ - self.make_mac( - networks=[network], node=factory.make_node(sortable_name=True), - owner=self.logged_in_user) - for _ in range(4) - ] - # Create MACs connected to the same node. - macs = macs + [ - self.make_mac( - networks=[network], owner=self.logged_in_user, - node=macs[0].node) - for _ in range(3) - ] - sorted_macs = sorted( - macs, - key=lambda x: (x.node.hostname.lower(), x.mac_address.get_raw())) - self.assertEqual( - [mac.mac_address.get_raw() for mac in sorted_macs], - self.extract_macs(self.request_connected_macs(network))) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_networks.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_networks.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_networks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_networks.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,142 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for networks API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.models import Network -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory -from maasserver.utils.orm import get_one - - -class TestNetworksAPI(APITestCase): - - def test_handler_path(self): - self.assertEqual('/api/1.0/networks/', reverse('networks_handler')) - - def test_POST_creates_network(self): - self.become_admin() - net = factory.getRandomNetwork() - params = { - 'name': factory.make_name('net'), - 'ip': '%s' % net.cidr.ip, - 'netmask': '%s' % net.netmask, - 'vlan_tag': factory.make_vlan_tag(), - 'description': factory.getRandomString(), - } - response = self.client.post(reverse('networks_handler'), params) - self.assertEqual(httplib.OK, response.status_code) - network = Network.objects.get(name=params['name']) - self.assertAttributes(network, params) - - def test_POST_requires_admin(self): - name = factory.make_name('no-net') - response = self.client.post( - reverse('networks_handler'), - {'name': name}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertIsNone(get_one(Network.objects.filter(name=name))) - - def test_GET_returns_networks(self): - original_network = factory.make_network() - - response = self.client.get(reverse('networks_handler')) - self.assertEqual(httplib.OK, response.status_code, response.content) - - parsed_result = json.loads(response.content) - self.assertEqual(1, len(parsed_result)) - [returned_network] = parsed_result - fields = {'name', 'ip', 'netmask', 'vlan_tag', 'description'} - self.assertEqual( - fields.union({'resource_uri'}), - set(returned_network.keys())) - expected_values = { - field: getattr(original_network, field) - for field in fields - if field != 'resource_uri' - } - expected_values['resource_uri'] = reverse( - 'network_handler', args=[original_network.name]) - self.assertEqual(expected_values, returned_network) - - def test_GET_returns_empty_if_no_networks(self): - response = self.client.get(reverse('networks_handler')) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual([], json.loads(response.content)) - - def test_GET_sorts_by_name(self): - networks = factory.make_networks(3, sortable_name=True) - response = self.client.get(reverse('networks_handler')) - self.assertEqual(httplib.OK, response.status_code, response.content) - - self.assertEqual( - sorted(network.name for network in networks), - [network['name'] for network in json.loads(response.content)]) - - def test_GET_filters_by_node(self): - networks = factory.make_networks(5) - mac = factory.make_mac_address(networks=networks[1:3]) - node = mac.node - response = self.client.get( - reverse('networks_handler'), - {'node': [node.system_id]}) - self.assertEqual(httplib.OK, response.status_code, response.content) - - self.assertEqual( - {network.name for network in mac.networks.all()}, - {network['name'] for network in json.loads(response.content)}) - - def test_GET_combines_node_filters_as_intersection_of_networks(self): - networks = factory.make_networks(5) - mac1 = factory.make_mac_address(networks=networks[1:3]) - mac2 = factory.make_mac_address(networks=networks[2:4]) - node1 = mac1.node - # Attach another MAC address to node1. - factory.make_mac_address(networks=networks[1:2], node=node1) - node2 = mac2.node - - response = self.client.get( - reverse('networks_handler'), - {'node': [node1.system_id, node2.system_id]}) - self.assertEqual(httplib.OK, response.status_code, response.content) - - self.assertEqual( - {networks[2].name}, - {network['name'] for network in json.loads(response.content)}) - - def test_GET_fails_if_filtering_by_nonexistent_node(self): - bad_system_id = factory.make_name('no_node') - response = self.client.get( - reverse('networks_handler'), - {'node': [bad_system_id]}) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - {'node': ["Unknown node(s): %s." % bad_system_id]}, - json.loads(response.content)) - - def test_GET_ignores_duplicates(self): - factory.make_network() - mac = factory.make_mac_address(networks=[factory.make_network()]) - node = mac.node - response = self.client.get( - reverse('networks_handler'), - {'node': [node.system_id, node.system_id]}) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertEqual( - {network.name for network in mac.networks.all()}, - {network['name'] for network in json.loads(response.content)}) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_nodegroup.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_nodegroup.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_nodegroup.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_nodegroup.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,802 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the NodeGroups API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json -from textwrap import dedent - -from apiclient.maas_client import MAASClient -import bson -from django.conf import settings -from django.contrib.auth.models import AnonymousUser -from django.core.urlresolvers import reverse -from fixtures import EnvironmentVariableFixture -from maasserver.enum import ( - NODEGROUP_STATUS, - NODEGROUP_STATUS_CHOICES, - ) -from maasserver.models import ( - Config, - DHCPLease, - DownloadProgress, - NodeGroup, - nodegroup as nodegroup_module, - ) -from maasserver.refresh_worker import refresh_worker -from maasserver.testing import ( - reload_object, - reload_objects, - ) -from maasserver.testing.api import ( - APITestCase, - explain_unexpected_response, - log_in_as_normal_user, - make_worker_client, - MultipleUsersScenarios, - ) -from maasserver.testing.factory import factory -from maasserver.testing.oauthclient import OAuthAuthenticatedClient -from maasserver.testing.testcase import MAASServerTestCase -from maastesting.celery import CeleryFixture -from maastesting.fakemethod import FakeMethod -from maastesting.matchers import MockCalledOnceWith -from metadataserver.fields import Bin -from metadataserver.models import ( - commissioningscript, - NodeCommissionResult, - ) -from mock import ( - ANY, - Mock, - ) -from provisioningserver import tasks -from provisioningserver.auth import get_recorded_nodegroup_uuid -from provisioningserver.dhcp.leases import send_leases -from provisioningserver.omshell import Omshell -from testresources import FixtureResource -from testtools.matchers import ( - AllMatch, - Equals, - ) - - -class TestNodeGroupsAPI(MultipleUsersScenarios, - MAASServerTestCase): - scenarios = [ - ('anon', dict(userfactory=lambda: AnonymousUser())), - ('user', dict(userfactory=factory.make_user)), - ('admin', dict(userfactory=factory.make_admin)), - ] - - resources = ( - ('celery', FixtureResource(CeleryFixture())), - ) - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/nodegroups/', reverse('nodegroups_handler')) - - def test_reverse_points_to_nodegroups_api(self): - self.assertEqual( - reverse('nodegroups_handler'), reverse('nodegroups_handler')) - - def test_nodegroups_index_lists_nodegroups(self): - # The nodegroups index lists node groups for the MAAS. - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('nodegroups_handler'), {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - [{ - 'uuid': nodegroup.uuid, - 'status': nodegroup.status, - 'name': nodegroup.name, - 'cluster_name': nodegroup.cluster_name, - }], - json.loads(response.content)) - - -class TestAnonNodeGroupsAPI(MAASServerTestCase): - - resources = ( - ('celery', FixtureResource(CeleryFixture())), - ) - - def test_refresh_calls_refresh_worker(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - response = self.client.post( - reverse('nodegroups_handler'), {'op': 'refresh_workers'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(nodegroup.uuid, get_recorded_nodegroup_uuid()) - - def test_refresh_does_not_return_secrets(self): - # The response from "refresh" contains only an innocuous - # confirmation. Anyone can call this method, so it mustn't - # reveal anything sensitive. - response = self.client.post( - reverse('nodegroups_handler'), {'op': 'refresh_workers'}) - self.assertEqual( - (httplib.OK, "Sending worker refresh."), - (response.status_code, response.content)) - - -class TestNodeGroupAPI(APITestCase): - - resources = ( - ('celery', FixtureResource(CeleryFixture())), - ) - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/nodegroups/name/', - reverse('nodegroup_handler', args=['name'])) - - def test_GET_returns_node_group(self): - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('nodegroup_handler', args=[nodegroup.uuid])) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - nodegroup.uuid, json.loads(response.content).get('uuid')) - - def test_GET_returns_404_for_unknown_node_group(self): - response = self.client.get( - reverse( - 'nodegroup_handler', - args=[factory.make_name('nodegroup')])) - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_PUT_reserved_to_admin_users(self): - nodegroup = factory.make_node_group() - response = self.client_put( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'name': factory.make_name("new-name")}) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_PUT_updates_nodegroup(self): - # The api allows the updating of a NodeGroup. - nodegroup = factory.make_node_group() - self.become_admin() - new_name = factory.make_name("new-name") - new_cluster_name = factory.make_name("new-cluster-name") - new_status = factory.getRandomChoice( - NODEGROUP_STATUS_CHOICES, but_not=[nodegroup.status]) - response = self.client_put( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'name': new_name, - 'cluster_name': new_cluster_name, - 'status': new_status, - }) - - self.assertEqual(httplib.OK, response.status_code, response.content) - nodegroup = reload_object(nodegroup) - self.assertEqual( - (new_name, new_cluster_name, new_status), - (nodegroup.name, nodegroup.cluster_name, nodegroup.status)) - - def test_PUT_updates_nodegroup_validates_data(self): - nodegroup, _ = factory.make_unrenamable_nodegroup_with_node() - self.become_admin() - new_name = factory.make_name("new-name") - response = self.client_put( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'name': new_name}) - - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn( - "Can't rename DNS zone", - parsed_result['name'][0]) - - def test_update_leases_processes_empty_leases_dict(self): - nodegroup = factory.make_node_group() - factory.make_dhcp_lease(nodegroup=nodegroup) - client = make_worker_client(nodegroup) - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'update_leases', - 'leases': json.dumps({}), - }) - self.assertEqual( - (httplib.OK, "Leases updated."), - (response.status_code, response.content)) - self.assertItemsEqual( - [], DHCPLease.objects.filter(nodegroup=nodegroup)) - - def test_update_leases_stores_leases(self): - self.patch(Omshell, 'create') - nodegroup = factory.make_node_group() - lease = factory.make_random_leases() - client = make_worker_client(nodegroup) - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'update_leases', - 'leases': json.dumps(lease), - }) - self.assertEqual( - (httplib.OK, "Leases updated."), - (response.status_code, response.content)) - self.assertItemsEqual( - lease.keys(), [ - dhcplease.ip - for dhcplease in DHCPLease.objects.filter(nodegroup=nodegroup) - ]) - - def test_update_leases_adds_new_leases_on_worker(self): - nodegroup = factory.make_node_group() - client = make_worker_client(nodegroup) - self.patch(Omshell, 'create', FakeMethod()) - new_leases = factory.make_random_leases() - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'update_leases', - 'leases': json.dumps(new_leases), - }) - self.assertEqual( - (httplib.OK, "Leases updated."), - (response.status_code, response.content)) - self.assertEqual( - [(new_leases.keys()[0], new_leases.values()[0])], - Omshell.create.extract_args()) - - def test_update_leases_does_not_add_old_leases(self): - self.patch(Omshell, 'create') - nodegroup = factory.make_node_group() - client = make_worker_client(nodegroup) - self.patch(tasks, 'add_new_dhcp_host_map', FakeMethod()) - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'update_leases', - 'leases': json.dumps(factory.make_random_leases()), - }) - self.assertEqual( - (httplib.OK, "Leases updated."), - (response.status_code, response.content)) - self.assertEqual([], tasks.add_new_dhcp_host_map.calls) - - def test_worker_calls_update_leases(self): - # In bug 1041158, the worker's upload_leases task tried to call - # the update_leases API at the wrong URL path. It has the right - # path now. - self.useFixture( - EnvironmentVariableFixture("MAAS_URL", settings.DEFAULT_MAAS_URL)) - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - refresh_worker(nodegroup) - self.patch(MAASClient, 'post', Mock()) - leases = factory.make_random_leases() - send_leases(leases) - nodegroup_path = reverse( - 'nodegroup_handler', args=[nodegroup.uuid]) - nodegroup_path = nodegroup_path.decode('ascii').lstrip('/') - MAASClient.post.assert_called_once_with( - nodegroup_path, 'update_leases', leases=json.dumps(leases)) - - def test_accept_accepts_nodegroup(self): - nodegroups = [factory.make_node_group() for i in range(3)] - uuids = [nodegroup.uuid for nodegroup in nodegroups] - self.become_admin() - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'accept', - 'uuid': uuids, - }) - self.assertEqual( - (httplib.OK, "Nodegroup(s) accepted."), - (response.status_code, response.content)) - self.assertThat( - [nodegroup.status for nodegroup in - reload_objects(NodeGroup, nodegroups)], - AllMatch(Equals(NODEGROUP_STATUS.ACCEPTED))) - - def test_accept_reserved_to_admin(self): - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'accept', - 'uuid': factory.getRandomString(), - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_reject_rejects_nodegroup(self): - nodegroups = [factory.make_node_group() for i in range(3)] - uuids = [nodegroup.uuid for nodegroup in nodegroups] - self.become_admin() - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'reject', - 'uuid': uuids, - }) - self.assertEqual( - (httplib.OK, "Nodegroup(s) rejected."), - (response.status_code, response.content)) - self.assertThat( - [nodegroup.status for nodegroup in - reload_objects(NodeGroup, nodegroups)], - AllMatch(Equals(NODEGROUP_STATUS.REJECTED))) - - def test_reject_reserved_to_admin(self): - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'reject', - 'uuid': factory.getRandomString(), - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_import_boot_images_calls_script_for_all_accepted_clusters(self): - recorder = self.patch(nodegroup_module, 'import_boot_images') - proxy = factory.make_name('proxy') - Config.objects.set_config('http_proxy', proxy) - accepted_nodegroups = [ - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED), - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED), - ] - factory.make_node_group(status=NODEGROUP_STATUS.REJECTED) - factory.make_node_group(status=NODEGROUP_STATUS.PENDING) - admin = factory.make_admin() - client = OAuthAuthenticatedClient(admin) - response = client.post( - reverse('nodegroups_handler'), {'op': 'import_boot_images'}) - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - queues = [ - kwargs['queue'] - for args, kwargs in recorder.apply_async.call_args_list] - self.assertItemsEqual( - [nodegroup.work_queue for nodegroup in accepted_nodegroups], - queues) - - def test_import_boot_images_denied_if_not_admin(self): - user = factory.make_user() - client = OAuthAuthenticatedClient(user) - response = client.post( - reverse('nodegroups_handler'), {'op': 'import_boot_images'}) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def test_report_download_progress_accepts_new_download(self): - nodegroup = factory.make_node_group() - filename = factory.getRandomString() - client = make_worker_client(nodegroup) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'report_download_progress', - 'filename': filename, - }) - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - - progress = DownloadProgress.objects.get(nodegroup=nodegroup) - self.assertEqual(nodegroup, progress.nodegroup) - self.assertEqual(filename, progress.filename) - self.assertIsNone(progress.size) - self.assertIsNone(progress.bytes_downloaded) - self.assertEqual('', progress.error) - - def test_report_download_progress_updates_ongoing_download(self): - progress = factory.make_download_progress_incomplete() - client = make_worker_client(progress.nodegroup) - new_bytes_downloaded = progress.bytes_downloaded + 1 - - response = client.post( - reverse('nodegroup_handler', args=[progress.nodegroup.uuid]), - { - 'op': 'report_download_progress', - 'filename': progress.filename, - 'bytes_downloaded': new_bytes_downloaded, - }) - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - - progress = reload_object(progress) - self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded) - - def test_report_download_progress_rejects_invalid_data(self): - progress = factory.make_download_progress_incomplete() - client = make_worker_client(progress.nodegroup) - - response = client.post( - reverse('nodegroup_handler', args=[progress.nodegroup.uuid]), - { - 'op': 'report_download_progress', - 'filename': progress.filename, - 'bytes_downloaded': -1, - }) - self.assertEqual( - httplib.BAD_REQUEST, response.status_code, - explain_unexpected_response(httplib.BAD_REQUEST, response)) - - def test_probe_and_enlist_ucsm_adds_ucsm(self): - nodegroup = factory.make_node_group() - url = 'http://url' - username = factory.make_name('user') - password = factory.make_name('password') - self.become_admin() - - mock = self.patch(nodegroup_module, 'enlist_nodes_from_ucsm') - - response = self.client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'probe_and_enlist_ucsm', - 'url': url, - 'username': username, - 'password': password, - }) - - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - - args = (url, username, password) - matcher = MockCalledOnceWith(queue=nodegroup.uuid, args=args) - self.assertThat(mock.apply_async, matcher) - - def test_probe_and_enlist_mscm_adds_mscm(self): - nodegroup = factory.make_node_group() - host = 'http://host' - username = factory.make_name('user') - password = factory.make_name('password') - self.become_admin() - - mock = self.patch(nodegroup_module, 'enlist_nodes_from_mscm') - - response = self.client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'probe_and_enlist_mscm', - 'host': host, - 'username': username, - 'password': password, - }) - - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - - args = (host, username, password) - matcher = MockCalledOnceWith(queue=nodegroup.uuid, args=args) - self.assertThat(mock.apply_async, matcher) - - -class TestNodeGroupAPIAuth(MAASServerTestCase): - """Authorization tests for nodegroup API.""" - - example_lshw_details = dedent("""\ - - - """).encode("ascii") - - example_lshw_details_bin = bson.binary.Binary(example_lshw_details) - - def set_lshw_details(self, node, data): - NodeCommissionResult.objects.store_data( - node, commissioningscript.LSHW_OUTPUT_NAME, - script_result=0, data=Bin(data)) - - example_lldp_details = dedent("""\ - - %d - """).encode("ascii") - - example_lldp_details_bin = bson.binary.Binary(example_lldp_details) - - def set_lldp_details(self, node, data): - NodeCommissionResult.objects.store_data( - node, commissioningscript.LLDP_OUTPUT_NAME, - script_result=0, data=Bin(data)) - - def test_nodegroup_requires_authentication(self): - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('nodegroup_handler', args=[nodegroup.uuid])) - self.assertEqual(httplib.UNAUTHORIZED, response.status_code) - - def test_update_leases_works_for_nodegroup_worker(self): - nodegroup = factory.make_node_group() - client = make_worker_client(nodegroup) - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'update_leases', 'leases': json.dumps({})}) - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - - def test_update_leases_does_not_work_for_normal_user(self): - nodegroup = factory.make_node_group() - log_in_as_normal_user(self.client) - response = self.client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'update_leases', 'leases': json.dumps({})}) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def test_update_leases_does_not_let_worker_update_other_nodegroup(self): - requesting_nodegroup = factory.make_node_group() - about_nodegroup = factory.make_node_group() - client = make_worker_client(requesting_nodegroup) - - response = client.post( - reverse('nodegroup_handler', args=[about_nodegroup.uuid]), - {'op': 'update_leases', 'leases': json.dumps({})}) - - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def test_nodegroup_list_nodes_requires_authentication(self): - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'list_nodes'}) - self.assertEqual(httplib.UNAUTHORIZED, response.status_code) - - def test_nodegroup_list_nodes_does_not_work_for_normal_user(self): - nodegroup = factory.make_node_group() - log_in_as_normal_user(self.client) - - response = self.client.get( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'list_nodes'}) - - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def test_nodegroup_list_nodes_works_for_nodegroup_worker(self): - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - client = make_worker_client(nodegroup) - - response = client.get( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'list_nodes'}) - - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - parsed_result = json.loads(response.content) - self.assertItemsEqual([node.system_id], parsed_result) - - def test_nodegroup_list_nodes_works_for_admin(self): - nodegroup = factory.make_node_group() - admin = factory.make_admin() - client = OAuthAuthenticatedClient(admin) - node = factory.make_node(nodegroup=nodegroup) - - response = client.get( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'list_nodes'}) - - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - parsed_result = json.loads(response.content) - self.assertItemsEqual([node.system_id], parsed_result) - - def test_nodegroup_import_boot_images_calls_script(self): - recorder = self.patch(tasks, 'call_and_check') - self.patch(nodegroup_module, 'report_boot_images') - proxy = factory.getRandomString() - Config.objects.set_config('http_proxy', proxy) - nodegroup = factory.make_node_group() - admin = factory.make_admin() - client = OAuthAuthenticatedClient(admin) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'import_boot_images'}) - - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - recorder.assert_called_once_with( - ['sudo', '-n', '-E', 'maas-import-pxe-files'], env=ANY) - - def test_nodegroup_import_boot_images_denied_if_not_admin(self): - nodegroup = factory.make_node_group() - user = factory.make_user() - client = OAuthAuthenticatedClient(user) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'import_boot_images'}) - - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def make_details_request(self, client, nodegroup): - system_ids = {node.system_id for node in nodegroup.node_set.all()} - return client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'details', 'system_ids': system_ids}) - - def test_details_requires_authentication(self): - nodegroup = factory.make_node_group() - response = self.make_details_request(self.client, nodegroup) - self.assertEqual(httplib.UNAUTHORIZED, response.status_code) - - def test_details_refuses_nonworker(self): - log_in_as_normal_user(self.client) - nodegroup = factory.make_node_group() - response = self.make_details_request(self.client, nodegroup) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def test_details_returns_details(self): - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - self.set_lshw_details(node, self.example_lshw_details) - self.set_lldp_details(node, self.example_lldp_details) - client = make_worker_client(nodegroup) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'details', 'system_ids': [node.system_id]}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = bson.BSON(response.content).decode() - self.assertDictEqual( - { - node.system_id: { - "lshw": self.example_lshw_details_bin, - "lldp": self.example_lldp_details_bin, - }, - }, - parsed_result) - - def test_details_allows_admin(self): - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - user = factory.make_admin() - client = OAuthAuthenticatedClient(user) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'details', 'system_ids': [node.system_id]}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = bson.BSON(response.content).decode() - self.assertDictEqual( - { - node.system_id: { - "lshw": None, - "lldp": None, - }, - }, - parsed_result) - - def test_empty_details(self): - # Empty details are passed through. - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - self.set_lshw_details(node, b'') - self.set_lldp_details(node, b'') - client = make_worker_client(nodegroup) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - {'op': 'details', 'system_ids': [node.system_id]}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = bson.BSON(response.content).decode() - self.assertDictEqual( - { - node.system_id: { - "lshw": bson.binary.Binary(b""), - "lldp": bson.binary.Binary(b""), - }, - }, - parsed_result) - - def test_details_does_not_see_other_node_groups(self): - nodegroup_mine = factory.make_node_group() - nodegroup_theirs = factory.make_node_group() - node_mine = factory.make_node(nodegroup=nodegroup_mine) - self.set_lshw_details(node_mine, self.example_lshw_details) - node_theirs = factory.make_node(nodegroup=nodegroup_theirs) - self.set_lldp_details(node_theirs, self.example_lldp_details) - client = make_worker_client(nodegroup_mine) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup_mine.uuid]), - {'op': 'details', - 'system_ids': [node_mine.system_id, node_theirs.system_id]}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = bson.BSON(response.content).decode() - self.assertDictEqual( - { - node_mine.system_id: { - "lshw": self.example_lshw_details_bin, - "lldp": None, - }, - }, - parsed_result) - - def test_details_with_no_details(self): - # If there are no nodes, an empty map is returned. - nodegroup = factory.make_node_group() - client = make_worker_client(nodegroup) - response = self.make_details_request(client, nodegroup) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = bson.BSON(response.content).decode() - self.assertDictEqual({}, parsed_result) - - def test_POST_report_download_progress_works_for_nodegroup_worker(self): - nodegroup = factory.make_node_group() - filename = factory.getRandomString() - client = make_worker_client(nodegroup) - - response = client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'report_download_progress', - 'filename': filename, - }) - - self.assertEqual( - httplib.OK, response.status_code, - explain_unexpected_response(httplib.OK, response)) - - def test_POST_report_download_progress_does_not_work_for_normal_user(self): - nodegroup = factory.make_node_group() - log_in_as_normal_user(self.client) - - response = self.client.post( - reverse('nodegroup_handler', args=[nodegroup.uuid]), - { - 'op': 'report_download_progress', - 'filename': factory.getRandomString(), - }) - - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) - - def test_POST_report_download_progress_does_work_for_other_cluster(self): - filename = factory.getRandomString() - client = make_worker_client(factory.make_node_group()) - - response = client.post( - reverse( - 'nodegroup_handler', args=[factory.make_node_group().uuid]), - { - 'op': 'report_download_progress', - 'filename': filename, - }) - - self.assertEqual( - httplib.FORBIDDEN, response.status_code, - explain_unexpected_response(httplib.FORBIDDEN, response)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_node.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_node.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_node.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_node.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,889 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the Node API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from base64 import b64encode -from cStringIO import StringIO -import httplib -import json -import sys - -import bson -from django.core.urlresolvers import reverse -from maasserver.enum import ( - DISTRO_SERIES, - NODE_STATUS, - NODE_STATUS_CHOICES_DICT, - ) -from maasserver.fields import ( - MAC, - mac_error_msg, - ) -from maasserver.models import Node -from maasserver.testing import ( - reload_object, - reload_objects, - ) -from maasserver.testing.api import APITestCase -from maasserver.testing.architecture import make_usable_architecture -from maasserver.testing.factory import factory -from maasserver.testing.oauthclient import OAuthAuthenticatedClient -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import map_enum -from metadataserver.models import ( - commissioningscript, - NodeKey, - NodeUserData, - ) -from metadataserver.nodeinituser import get_node_init_user - - -class NodeAnonAPITest(MAASServerTestCase): - - def test_anon_nodes_GET(self): - # Anonymous requests to the API without a specified operation - # get a "Bad Request" response. - response = self.client.get(reverse('nodes_handler')) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - - def test_anon_api_doc(self): - # The documentation is accessible to anon users. - self.patch(sys, "stderr", StringIO()) - response = self.client.get(reverse('api-doc')) - self.assertEqual(httplib.OK, response.status_code) - # No error or warning are emitted by docutils. - self.assertEqual("", sys.stderr.getvalue()) - - def test_node_init_user_cannot_access(self): - token = NodeKey.objects.get_token_for_node(factory.make_node()) - client = OAuthAuthenticatedClient(get_node_init_user(), token) - response = client.get(reverse('nodes_handler'), {'op': 'list'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - -class NodesAPILoggedInTest(MAASServerTestCase): - - def test_nodes_GET_logged_in(self): - # A (Django) logged-in user can access the API. - self.client_log_in() - node = factory.make_node() - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - [node.system_id], - [parsed_node.get('system_id') for parsed_node in parsed_result]) - - -class TestNodeAPI(APITestCase): - """Tests for /api/1.0/nodes//.""" - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/nodes/node-name/', - reverse('node_handler', args=['node-name'])) - - def get_node_uri(self, node): - """Get the API URI for `node`.""" - return reverse('node_handler', args=[node.system_id]) - - def test_GET_returns_node(self): - # The api allows for fetching a single Node (using system_id). - node = factory.make_node() - response = self.client.get(self.get_node_uri(node)) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(node.hostname, parsed_result['hostname']) - self.assertEqual(node.system_id, parsed_result['system_id']) - - def test_GET_returns_associated_tag(self): - node = factory.make_node() - tag = factory.make_tag() - node.tags.add(tag) - response = self.client.get(self.get_node_uri(node)) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual([tag.name], parsed_result['tag_names']) - - def test_GET_returns_associated_ip_addresses(self): - node = factory.make_node() - mac = factory.make_mac_address(node=node) - lease = factory.make_dhcp_lease( - nodegroup=node.nodegroup, mac=mac.mac_address) - response = self.client.get(self.get_node_uri(node)) - - self.assertEqual( - httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - self.assertEqual([lease.ip], parsed_result['ip_addresses']) - - def test_GET_returns_associated_routers(self): - macs = [MAC('aa:bb:cc:dd:ee:ff'), MAC('00:11:22:33:44:55')] - node = factory.make_node(routers=macs) - response = self.client.get(self.get_node_uri(node)) - - self.assertEqual( - httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [mac.get_raw() for mac in macs], parsed_result['routers']) - - def test_GET_returns_zone(self): - node = factory.make_node() - response = self.client.get(self.get_node_uri(node)) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual( - [node.zone.name, node.zone.description], - [ - parsed_result['zone']['name'], - parsed_result['zone']['description']]) - - def test_GET_refuses_to_access_nonexistent_node(self): - # When fetching a Node, the api returns a 'Not Found' (404) error - # if no node is found. - url = reverse('node_handler', args=['invalid-uuid']) - - response = self.client.get(url) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_GET_returns_owner_name_when_allocated_to_self(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=self.logged_in_user) - response = self.client.get(self.get_node_uri(node)) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(node.owner.username, parsed_result["owner"]) - - def test_GET_returns_owner_name_when_allocated_to_other_user(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - response = self.client.get(self.get_node_uri(node)) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(node.owner.username, parsed_result["owner"]) - - def test_GET_returns_empty_owner_when_not_allocated(self): - node = factory.make_node(status=NODE_STATUS.READY) - response = self.client.get(self.get_node_uri(node)) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(None, parsed_result["owner"]) - - def test_POST_stop_checks_permission(self): - node = factory.make_node() - response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_POST_stop_returns_node(self): - node = factory.make_node(owner=self.logged_in_user) - response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - node.system_id, json.loads(response.content)['system_id']) - - def test_POST_stop_may_be_repeated(self): - node = factory.make_node( - owner=self.logged_in_user, mac=True, - power_type='ether_wake') - self.client.post(self.get_node_uri(node), {'op': 'stop'}) - response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) - self.assertEqual(httplib.OK, response.status_code) - - def test_POST_start_checks_permission(self): - node = factory.make_node() - response = self.client.post(self.get_node_uri(node), {'op': 'start'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_POST_start_returns_node(self): - node = factory.make_node( - owner=self.logged_in_user, mac=True, - power_type='ether_wake') - response = self.client.post(self.get_node_uri(node), {'op': 'start'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - node.system_id, json.loads(response.content)['system_id']) - - def test_POST_start_sets_distro_series(self): - node = factory.make_node( - owner=self.logged_in_user, mac=True, - power_type='ether_wake') - distro_series = factory.getRandomEnum(DISTRO_SERIES) - response = self.client.post( - self.get_node_uri(node), - {'op': 'start', 'distro_series': distro_series}) - self.assertEqual( - (httplib.OK, node.system_id), - (response.status_code, json.loads(response.content)['system_id'])) - self.assertEqual( - distro_series, reload_object(node).distro_series) - - def test_POST_start_validates_distro_series(self): - node = factory.make_node( - owner=self.logged_in_user, mac=True, - power_type='ether_wake') - invalid_distro_series = factory.getRandomString() - response = self.client.post( - self.get_node_uri(node), - {'op': 'start', 'distro_series': invalid_distro_series}) - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'distro_series': [ - "Value u'%s' is not a valid choice." % - invalid_distro_series]} - ), - (response.status_code, json.loads(response.content))) - - def test_POST_start_may_be_repeated(self): - node = factory.make_node( - owner=self.logged_in_user, mac=True, - power_type='ether_wake') - self.client.post(self.get_node_uri(node), {'op': 'start'}) - response = self.client.post(self.get_node_uri(node), {'op': 'start'}) - self.assertEqual(httplib.OK, response.status_code) - - def test_POST_start_stores_user_data(self): - node = factory.make_node( - owner=self.logged_in_user, mac=True, - power_type='ether_wake') - user_data = ( - b'\xff\x00\xff\xfe\xff\xff\xfe' + - factory.getRandomString().encode('ascii')) - response = self.client.post( - self.get_node_uri(node), { - 'op': 'start', - 'user_data': b64encode(user_data), - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(user_data, NodeUserData.objects.get_user_data(node)) - - def test_POST_release_releases_owned_node(self): - owned_statuses = [ - NODE_STATUS.RESERVED, - NODE_STATUS.ALLOCATED, - ] - owned_nodes = [ - factory.make_node(owner=self.logged_in_user, status=status) - for status in owned_statuses] - responses = [ - self.client.post(self.get_node_uri(node), {'op': 'release'}) - for node in owned_nodes] - self.assertEqual( - [httplib.OK] * len(owned_nodes), - [response.status_code for response in responses]) - self.assertItemsEqual( - [NODE_STATUS.READY] * len(owned_nodes), - [node.status for node in reload_objects(Node, owned_nodes)]) - - def test_POST_release_turns_on_netboot(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=self.logged_in_user) - node.set_netboot(on=False) - self.client.post(self.get_node_uri(node), {'op': 'release'}) - self.assertTrue(reload_object(node).netboot) - - def test_POST_release_resets_distro_series(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=self.logged_in_user, - distro_series=factory.getRandomEnum(DISTRO_SERIES)) - self.client.post(self.get_node_uri(node), {'op': 'release'}) - self.assertEqual('', reload_object(node).distro_series) - - def test_POST_release_resets_agent_name(self): - agent_name = factory.make_name('agent-name') - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=self.logged_in_user, - distro_series=factory.getRandomEnum(DISTRO_SERIES), - agent_name=agent_name) - self.client.post(self.get_node_uri(node), {'op': 'release'}) - self.assertEqual('', reload_object(node).agent_name) - - def test_POST_release_removes_token_and_user(self): - node = factory.make_node(status=NODE_STATUS.READY) - self.client.post(reverse('nodes_handler'), {'op': 'acquire'}) - node = Node.objects.get(system_id=node.system_id) - self.assertEqual(NODE_STATUS.ALLOCATED, node.status) - self.assertEqual(self.logged_in_user, node.owner) - self.assertEqual(self.client.token.key, node.token.key) - self.client.post(self.get_node_uri(node), {'op': 'release'}) - node = Node.objects.get(system_id=node.system_id) - self.assertIs(None, node.owner) - self.assertIs(None, node.token) - - def test_POST_release_does_nothing_for_unowned_node(self): - node = factory.make_node( - status=NODE_STATUS.READY, owner=self.logged_in_user) - response = self.client.post( - self.get_node_uri(node), {'op': 'release'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(NODE_STATUS.READY, reload_object(node).status) - - def test_POST_release_forbidden_if_user_cannot_edit_node(self): - node = factory.make_node(status=NODE_STATUS.READY) - response = self.client.post( - self.get_node_uri(node), {'op': 'release'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_POST_release_fails_for_other_node_states(self): - releasable_statuses = [ - NODE_STATUS.RESERVED, - NODE_STATUS.ALLOCATED, - NODE_STATUS.READY, - ] - unreleasable_statuses = [ - status - for status in map_enum(NODE_STATUS).values() - if status not in releasable_statuses - ] - nodes = [ - factory.make_node(status=status, owner=self.logged_in_user) - for status in unreleasable_statuses] - responses = [ - self.client.post(self.get_node_uri(node), {'op': 'release'}) - for node in nodes] - self.assertEqual( - [httplib.CONFLICT] * len(unreleasable_statuses), - [response.status_code for response in responses]) - self.assertItemsEqual( - unreleasable_statuses, - [node.status for node in reload_objects(Node, nodes)]) - - def test_POST_release_in_wrong_state_reports_current_state(self): - node = factory.make_node( - status=NODE_STATUS.RETIRED, owner=self.logged_in_user) - response = self.client.post( - self.get_node_uri(node), {'op': 'release'}) - self.assertEqual( - ( - httplib.CONFLICT, - "Node cannot be released in its current state ('Retired').", - ), - (response.status_code, response.content)) - - def test_POST_release_rejects_request_from_unauthorized_user(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - response = self.client.post( - self.get_node_uri(node), {'op': 'release'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertEqual(NODE_STATUS.ALLOCATED, reload_object(node).status) - - def test_POST_release_allows_admin_to_release_anyones_node(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - self.become_admin() - response = self.client.post( - self.get_node_uri(node), {'op': 'release'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(NODE_STATUS.READY, reload_object(node).status) - - def test_POST_release_combines_with_acquire(self): - node = factory.make_node(status=NODE_STATUS.READY) - response = self.client.post( - reverse('nodes_handler'), {'op': 'acquire'}) - self.assertEqual(NODE_STATUS.ALLOCATED, reload_object(node).status) - node_uri = json.loads(response.content)['resource_uri'] - response = self.client.post(node_uri, {'op': 'release'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(NODE_STATUS.READY, reload_object(node).status) - - def test_POST_commission_commissions_node(self): - node = factory.make_node( - status=NODE_STATUS.READY, owner=factory.make_user()) - self.become_admin() - response = self.client.post( - self.get_node_uri(node), {'op': 'commission'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(NODE_STATUS.COMMISSIONING, reload_object(node).status) - - def test_PUT_updates_node(self): - # The api allows the updating of a Node. - node = factory.make_node( - hostname='diane', owner=self.logged_in_user, - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), {'hostname': 'francis'}) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual('francis', parsed_result['hostname']) - self.assertEqual(0, Node.objects.filter(hostname='diane').count()) - self.assertEqual(1, Node.objects.filter(hostname='francis').count()) - - def test_PUT_omitted_hostname(self): - hostname = factory.make_name('hostname') - arch = make_usable_architecture(self) - node = factory.make_node( - hostname=hostname, owner=self.logged_in_user, architecture=arch) - response = self.client_put( - self.get_node_uri(node), - {'architecture': arch}) - self.assertEqual(httplib.OK, response.status_code, response.content) - self.assertTrue(Node.objects.filter(hostname=hostname).exists()) - - def test_PUT_ignores_unknown_fields(self): - node = factory.make_node( - owner=self.logged_in_user, - architecture=make_usable_architecture(self)) - field = factory.getRandomString() - response = self.client_put( - self.get_node_uri(node), - {field: factory.getRandomString()} - ) - - self.assertEqual(httplib.OK, response.status_code) - - def test_PUT_admin_can_change_power_type(self): - self.become_admin() - original_power_type = factory.getRandomPowerType() - new_power_type = factory.getRandomPowerType( - but_not=original_power_type) - node = factory.make_node( - owner=self.logged_in_user, - power_type=original_power_type, - architecture=make_usable_architecture(self)) - self.client_put( - self.get_node_uri(node), - {'power_type': new_power_type} - ) - - self.assertEqual( - new_power_type, reload_object(node).power_type) - - def test_PUT_non_admin_cannot_change_power_type(self): - original_power_type = factory.getRandomPowerType() - new_power_type = factory.getRandomPowerType( - but_not=original_power_type) - node = factory.make_node( - owner=self.logged_in_user, power_type=original_power_type) - self.client_put( - self.get_node_uri(node), - {'power_type': new_power_type} - ) - - self.assertEqual( - original_power_type, reload_object(node).power_type) - - def test_resource_uri_points_back_at_node(self): - # When a Node is returned by the API, the field 'resource_uri' - # provides the URI for this Node. - node = factory.make_node( - hostname='diane', owner=self.logged_in_user, - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), {'hostname': 'francis'}) - parsed_result = json.loads(response.content) - - self.assertEqual( - reverse('node_handler', args=[parsed_result['system_id']]), - parsed_result['resource_uri']) - - def test_PUT_rejects_invalid_data(self): - # If the data provided to update a node is invalid, a 'Bad request' - # response is returned. - node = factory.make_node( - hostname='diane', owner=self.logged_in_user, - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), {'hostname': 'too long' * 100}) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - {'hostname': - ['Ensure this value has at most 255 characters ' - '(it has 800).']}, - parsed_result) - - def test_PUT_refuses_to_update_invisible_node(self): - # The request to update a single node is denied if the node isn't - # visible by the user. - other_node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - - response = self.client_put(self.get_node_uri(other_node)) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_PUT_refuses_to_update_nonexistent_node(self): - # When updating a Node, the api returns a 'Not Found' (404) error - # if no node is found. - url = reverse('node_handler', args=['invalid-uuid']) - response = self.client_put(url) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_PUT_updates_power_parameters_field(self): - # The api allows the updating of a Node's power_parameters field. - self.become_admin() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - architecture=make_usable_architecture(self)) - # Create a power_parameter valid for the selected power_type. - new_power_address = factory.getRandomMACAddress() - response = self.client_put( - self.get_node_uri(node), - {'power_parameters_mac_address': new_power_address}) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - {'mac_address': new_power_address}, - reload_object(node).power_parameters) - - def test_PUT_updates_cpu_memory_storage(self): - self.become_admin() - node = factory.make_node( - owner=self.logged_in_user, - power_type=factory.getRandomPowerType(), - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), - {'cpu_count': 1, 'memory': 1024, 'storage': 2048}) - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual(1, node.cpu_count) - self.assertEqual(1024, node.memory) - self.assertEqual(2048, node.storage) - - def test_PUT_updates_power_parameters_accepts_only_mac_for_wol(self): - self.become_admin() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - architecture=make_usable_architecture(self)) - # Create an invalid power_parameter for WoL (not a valid - # MAC address). - new_power_address = factory.getRandomString() - response = self.client_put( - self.get_node_uri(node), - {'power_parameters_mac_address': new_power_address}) - - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'power_parameters': ["MAC Address: %s" % mac_error_msg]}, - ), - (response.status_code, json.loads(response.content))) - - def test_PUT_updates_power_parameters_rejects_unknown_param(self): - self.become_admin() - power_parameters = factory.getRandomString() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - power_parameters=power_parameters, - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), - {'power_parameters_unknown_param': factory.getRandomString()}) - - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'power_parameters': ["Unknown parameter(s): unknown_param."]} - ), - (response.status_code, json.loads(response.content))) - self.assertEqual( - power_parameters, reload_object(node).power_parameters) - - def test_PUT_updates_power_type_default_resets_params(self): - # If one sets power_type to empty, power_parameter gets - # reset by default (if skip_check is not set). - self.become_admin() - power_parameters = factory.getRandomString() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - power_parameters=power_parameters, - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), - {'power_type': ''}) - - node = reload_object(node) - self.assertEqual( - (httplib.OK, node.power_type, node.power_parameters), - (response.status_code, '', '')) - - def test_PUT_updates_power_type_empty_rejects_params(self): - # If one sets power_type to empty, one cannot set power_parameters. - self.become_admin() - power_parameters = factory.getRandomString() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - power_parameters=power_parameters, - architecture=make_usable_architecture(self)) - new_param = factory.getRandomString() - response = self.client_put( - self.get_node_uri(node), - { - 'power_type': '', - 'power_parameters_address': new_param, - }) - - node = reload_object(node) - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'power_parameters': ["Unknown parameter(s): address."]} - ), - (response.status_code, json.loads(response.content))) - self.assertEqual( - power_parameters, reload_object(node).power_parameters) - - def test_PUT_updates_power_type_empty_skip_check_to_force_params(self): - # If one sets power_type to empty, it is possible to pass - # power_parameter_skip_check='true' to force power_parameters. - # XXX bigjools 2014-01-21 Why is this necessary? - self.become_admin() - power_parameters = factory.getRandomString() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - power_parameters=power_parameters, - architecture=make_usable_architecture(self)) - new_param = factory.getRandomString() - response = self.client_put( - self.get_node_uri(node), - { - 'power_type': '', - 'power_parameters_param': new_param, - 'power_parameters_skip_check': 'true', - }) - - node = reload_object(node) - self.assertEqual( - (httplib.OK, node.power_type, node.power_parameters), - (response.status_code, '', {'param': new_param})) - - def test_PUT_updates_power_parameters_skip_ckeck(self): - # With power_parameters_skip_check, arbitrary data - # can be put in a Node's power_parameter field. - self.become_admin() - node = factory.make_node( - owner=self.logged_in_user, - architecture=make_usable_architecture(self)) - new_param = factory.getRandomString() - new_value = factory.getRandomString() - response = self.client_put( - self.get_node_uri(node), - { - 'power_parameters_%s' % new_param: new_value, - 'power_parameters_skip_check': 'true', - }) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - {new_param: new_value}, reload_object(node).power_parameters) - - def test_PUT_updates_power_parameters_empty_string(self): - self.become_admin() - node = factory.make_node( - owner=self.logged_in_user, - power_type='ether_wake', - power_parameters=factory.getRandomString(), - architecture=make_usable_architecture(self)) - response = self.client_put( - self.get_node_uri(node), - {'power_parameters_mac_address': ''}) - - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - {'mac_address': ''}, - reload_object(node).power_parameters) - - def test_PUT_sets_zone(self): - self.become_admin() - new_zone = factory.make_zone() - node = factory.make_node(architecture=make_usable_architecture(self)) - - response = self.client_put( - self.get_node_uri(node), {'zone': new_zone.name}) - - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual(new_zone, node.zone) - - def test_PUT_does_not_set_zone_if_not_present(self): - self.become_admin() - new_name = factory.make_name() - node = factory.make_node(architecture=make_usable_architecture(self)) - old_zone = node.zone - - response = self.client_put( - self.get_node_uri(node), {'hostname': new_name}) - - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual((old_zone, new_name), (node.zone, node.hostname)) - - #@skip( - # "XXX: JeroenVermeulen 2013-12-11 bug=1259872: Clearing the zone " - # "field does not work..") - def test_PUT_clears_zone(self): - # The @skip above breaks some 150 tests, with a strange error. - # Figuring this out is taking too long; I'm disabling the test in a - # simpler way. - return - self.become_admin() - node = factory.make_node(zone=factory.make_zone()) - - response = self.client_put(self.get_node_uri(node), {'zone': ''}) - - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual(None, node.zone) - - def test_PUT_without_zone_leaves_zone_unchanged(self): - self.become_admin() - zone = factory.make_zone() - node = factory.make_node( - zone=zone, architecture=make_usable_architecture(self)) - - response = self.client_put(self.get_node_uri(node), {}) - - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual(zone, node.zone) - - def test_PUT_zone_change_requires_admin(self): - new_zone = factory.make_zone() - node = factory.make_node( - owner=self.logged_in_user, - architecture=make_usable_architecture(self)) - old_zone = node.zone - - response = self.client_put( - self.get_node_uri(node), - {'zone': new_zone.name}) - - # Awkwardly, the request succeeds because for non-admins, "zone" is - # an unknown parameter. Unknown parameters are ignored. - self.assertEqual(httplib.OK, response.status_code) - # The node's physical zone, however, has not been updated. - node = reload_object(node) - self.assertEqual(old_zone, node.zone) - - def test_DELETE_deletes_node(self): - # The api allows to delete a Node. - self.become_admin() - node = factory.make_node(owner=self.logged_in_user) - system_id = node.system_id - response = self.client.delete(self.get_node_uri(node)) - - self.assertEqual(204, response.status_code) - self.assertItemsEqual([], Node.objects.filter(system_id=system_id)) - - def test_DELETE_cannot_delete_allocated_node(self): - # The api allows to delete a Node. - self.become_admin() - node = factory.make_node(status=NODE_STATUS.ALLOCATED) - response = self.client.delete(self.get_node_uri(node)) - - self.assertEqual( - (httplib.CONFLICT, - "Cannot delete node %s: node is in state %s." % ( - node.system_id, - NODE_STATUS_CHOICES_DICT[NODE_STATUS.ALLOCATED])), - (response.status_code, response.content)) - - def test_DELETE_deletes_node_fails_if_not_admin(self): - # Only superusers can delete nodes. - node = factory.make_node(owner=self.logged_in_user) - response = self.client.delete(self.get_node_uri(node)) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_DELETE_forbidden_without_edit_permission(self): - # A user without the edit permission cannot delete a Node. - node = factory.make_node() - response = self.client.delete(self.get_node_uri(node)) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_DELETE_refuses_to_delete_invisible_node(self): - # The request to delete a single node is denied if the node isn't - # visible by the user. - other_node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - - response = self.client.delete(self.get_node_uri(other_node)) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_DELETE_refuses_to_delete_nonexistent_node(self): - # When deleting a Node, the api returns a 'Not Found' (404) error - # if no node is found. - url = reverse('node_handler', args=['invalid-uuid']) - response = self.client.delete(url) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - -class TestGetDetails(APITestCase): - """Tests for /api/1.0/nodes//?op=details.""" - - def make_lshw_result(self, node, script_result=0): - return factory.make_node_commission_result( - node=node, name=commissioningscript.LSHW_OUTPUT_NAME, - script_result=script_result) - - def make_lldp_result(self, node, script_result=0): - return factory.make_node_commission_result( - node=node, name=commissioningscript.LLDP_OUTPUT_NAME, - script_result=script_result) - - def get_details(self, node): - url = reverse('node_handler', args=[node.system_id]) - response = self.client.get(url, {'op': 'details'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual('application/bson', response['content-type']) - return bson.BSON(response.content).decode() - - def test_GET_returns_empty_details_when_there_are_none(self): - node = factory.make_node() - self.assertDictEqual( - {"lshw": None, "lldp": None}, - self.get_details(node)) - - def test_GET_returns_all_details(self): - node = factory.make_node() - lshw_result = self.make_lshw_result(node) - lldp_result = self.make_lldp_result(node) - self.assertDictEqual( - {"lshw": bson.Binary(lshw_result.data), - "lldp": bson.Binary(lldp_result.data)}, - self.get_details(node)) - - def test_GET_returns_only_those_details_that_exist(self): - node = factory.make_node() - lshw_result = self.make_lshw_result(node) - self.assertDictEqual( - {"lshw": bson.Binary(lshw_result.data), - "lldp": None}, - self.get_details(node)) - - def test_GET_returns_not_found_when_node_does_not_exist(self): - url = reverse('node_handler', args=['does-not-exist']) - response = self.client.get(url, {'op': 'details'}) - self.assertEqual(httplib.NOT_FOUND, response.status_code) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_nodes.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_nodes.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_nodes.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_nodes.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1163 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the nodes API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json -import random - -from django.core.urlresolvers import reverse -from maasserver import forms -from maasserver.enum import ( - NODE_STATUS, - NODE_STATUS_CHOICES_DICT, - NODEGROUP_STATUS, - NODEGROUPINTERFACE_MANAGEMENT, - ) -from maasserver.exceptions import ClusterUnavailable -from maasserver.fields import MAC -from maasserver.models import Node -from maasserver.models.user import ( - create_auth_token, - get_auth_tokens, - ) -from maasserver.testing import reload_object -from maasserver.testing.api import ( - APITestCase, - MultipleUsersScenarios, - ) -from maasserver.testing.architecture import make_usable_architecture -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import ( - ignore_unused, - map_enum, - ) -from maasserver.utils.orm import get_one -from maastesting.djangotestcase import count_queries -from testtools.matchers import ( - Contains, - Equals, - MatchesListwise, - ) - - -class NodeHostnameTest(MultipleUsersScenarios, - MAASServerTestCase): - - scenarios = [ - ('user', dict(userfactory=factory.make_user)), - ('admin', dict(userfactory=factory.make_admin)), - ] - - def test_GET_list_returns_fqdn_with_domain_name_from_cluster(self): - # If DNS management is enabled, the domain part of a hostname - # is replaced by the domain name defined on the cluster. - hostname_without_domain = factory.make_name('hostname') - hostname_with_domain = '%s.%s' % ( - hostname_without_domain, factory.getRandomString()) - domain = factory.make_name('domain') - nodegroup = factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, - name=domain, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - factory.make_node( - hostname=hostname_with_domain, nodegroup=nodegroup) - expected_hostname = '%s.%s' % (hostname_without_domain, domain) - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [expected_hostname], - [node.get('hostname') for node in parsed_result]) - - -class AnonymousIsRegisteredAPITest(MAASServerTestCase): - - def test_is_registered_returns_True_if_node_registered(self): - mac_address = factory.getRandomMACAddress() - factory.make_mac_address(mac_address) - response = self.client.get( - reverse('nodes_handler'), - {'op': 'is_registered', 'mac_address': mac_address}) - self.assertEqual( - (httplib.OK, "true"), - (response.status_code, response.content)) - - def test_is_registered_returns_False_if_mac_registered_node_retired(self): - mac_address = factory.getRandomMACAddress() - mac = factory.make_mac_address(mac_address) - mac.node.status = NODE_STATUS.RETIRED - mac.node.save() - response = self.client.get( - reverse('nodes_handler'), - {'op': 'is_registered', 'mac_address': mac_address}) - self.assertEqual( - (httplib.OK, "false"), - (response.status_code, response.content)) - - def test_is_registered_normalizes_mac_address(self): - # These two non-normalized MAC addresses are the same. - non_normalized_mac_address = 'AA-bb-cc-dd-ee-ff' - non_normalized_mac_address2 = 'aabbccddeeff' - factory.make_mac_address(non_normalized_mac_address) - response = self.client.get( - reverse('nodes_handler'), - { - 'op': 'is_registered', - 'mac_address': non_normalized_mac_address2 - }) - self.assertEqual( - (httplib.OK, "true"), - (response.status_code, response.content)) - - def test_is_registered_returns_False_if_node_not_registered(self): - mac_address = factory.getRandomMACAddress() - response = self.client.get( - reverse('nodes_handler'), - {'op': 'is_registered', 'mac_address': mac_address}) - self.assertEqual( - (httplib.OK, "false"), - (response.status_code, response.content)) - - -def extract_system_ids(parsed_result): - """List the system_ids of the nodes in `parsed_result`.""" - return [node.get('system_id') for node in parsed_result] - - -class TestNodesAPI(APITestCase): - """Tests for /api/1.0/nodes/.""" - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/nodes/', reverse('nodes_handler')) - - def test_POST_new_creates_node(self): - # The API allows a non-admin logged-in user to create a Node. - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': make_usable_architecture(self), - 'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'], - }) - - self.assertEqual(httplib.OK, response.status_code) - - def test_POST_new_when_logged_in_creates_node_in_declared_state(self): - # When a user enlists a node, it goes into the Declared state. - # This will change once we start doing proper commissioning. - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': make_usable_architecture(self), - 'mac_addresses': ['aa:bb:cc:dd:ee:ff'], - }) - self.assertEqual(httplib.OK, response.status_code) - system_id = json.loads(response.content)['system_id'] - self.assertEqual( - NODE_STATUS.DECLARED, - Node.objects.get(system_id=system_id).status) - - def test_POST_new_when_no_RPC_to_cluster_defaults_empty_power(self): - # Test for bug 1305061, if there is no cluster RPC connection - # then make sure that power_type is defaulted to the empty - # string rather than being entirely absent, which results in a - # crash. - cluster_error = factory.make_name("cluster error") - self.patch(forms, 'get_power_types').side_effect = ( - ClusterUnavailable(cluster_error)) - self.become_admin() - # The patching behind the scenes to avoid *real* RPC is - # complex and the available power types is actually a - # valid set, so use an invalid type to trigger the bug here. - power_type = factory.make_name("power_type") - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'architecture': make_usable_architecture(self), - 'mac_addresses': ['aa:bb:cc:dd:ee:ff'], - 'power_type': power_type, - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - validation_errors = json.loads(response.content)['power_type'] - self.assertIn(cluster_error, validation_errors[0]) - - def test_GET_list_lists_nodes(self): - # The api allows for fetching the list of Nodes. - node1 = factory.make_node() - node2 = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=self.logged_in_user) - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - parsed_result = json.loads(response.content) - - self.assertEqual(httplib.OK, response.status_code) - self.assertItemsEqual( - [node1.system_id, node2.system_id], - extract_system_ids(parsed_result)) - - def create_nodes(self, nodegroup, nb): - [factory.make_node(nodegroup=nodegroup, mac=True) - for i in range(nb)] - - def test_GET_list_nodes_issues_constant_number_of_queries(self): - nodegroup = factory.make_node_group() - self.create_nodes(nodegroup, 10) - num_queries1, response1 = count_queries( - self.client.get, reverse('nodes_handler'), {'op': 'list'}) - self.create_nodes(nodegroup, 10) - num_queries2, response2 = count_queries( - self.client.get, reverse('nodes_handler'), {'op': 'list'}) - # Make sure the responses are ok as it's not useful to compare the - # number of queries if they are not. - self.assertEqual( - [httplib.OK, httplib.OK, 10, 20], - [ - response1.status_code, - response2.status_code, - len(extract_system_ids(json.loads(response1.content))), - len(extract_system_ids(json.loads(response2.content))), - ]) - self.assertEqual(num_queries1, num_queries2) - - def test_GET_list_without_nodes_returns_empty_list(self): - # If there are no nodes to list, the "list" op still works but - # returns an empty list. - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - self.assertItemsEqual([], json.loads(response.content)) - - def test_GET_list_orders_by_id(self): - # Nodes are returned in id order. - nodes = [factory.make_node() for counter in range(3)] - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - parsed_result = json.loads(response.content) - self.assertSequenceEqual( - [node.system_id for node in nodes], - extract_system_ids(parsed_result)) - - def test_GET_list_with_id_returns_matching_nodes(self): - # The "list" operation takes optional "id" parameters. Only - # nodes with matching ids will be returned. - ids = [factory.make_node().system_id for counter in range(3)] - matching_id = ids[0] - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'id': [matching_id], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [matching_id], extract_system_ids(parsed_result)) - - def test_GET_list_with_nonexistent_id_returns_empty_list(self): - # Trying to list a nonexistent node id returns a list containing - # no nodes -- even if other (non-matching) nodes exist. - existing_id = factory.make_node().system_id - nonexistent_id = existing_id + factory.getRandomString() - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'id': [nonexistent_id], - }) - self.assertItemsEqual([], json.loads(response.content)) - - def test_GET_list_with_ids_orders_by_id(self): - # Even when ids are passed to "list," nodes are returned in id - # order, not necessarily in the order of the id arguments. - ids = [factory.make_node().system_id for counter in range(3)] - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'id': list(reversed(ids)), - }) - parsed_result = json.loads(response.content) - self.assertSequenceEqual(ids, extract_system_ids(parsed_result)) - - def test_GET_list_with_some_matching_ids_returns_matching_nodes(self): - # If some nodes match the requested ids and some don't, only the - # matching ones are returned. - existing_id = factory.make_node().system_id - nonexistent_id = existing_id + factory.getRandomString() - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'id': [existing_id, nonexistent_id], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [existing_id], extract_system_ids(parsed_result)) - - def test_GET_list_with_hostname_returns_matching_nodes(self): - # The list operation takes optional "hostname" parameters. Only nodes - # with matching hostnames will be returned. - nodes = [factory.make_node() for counter in range(3)] - matching_hostname = nodes[0].hostname - matching_system_id = nodes[0].system_id - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'hostname': [matching_hostname], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [matching_system_id], extract_system_ids(parsed_result)) - - def test_GET_list_with_macs_returns_matching_nodes(self): - # The "list" operation takes optional "mac_address" parameters. Only - # nodes with matching MAC addresses will be returned. - macs = [factory.make_mac_address() for counter in range(3)] - matching_mac = macs[0].mac_address - matching_system_id = macs[0].node.system_id - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'mac_address': [matching_mac], - }) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [matching_system_id], extract_system_ids(parsed_result)) - - def test_GET_list_with_invalid_macs_returns_sensible_error(self): - # If specifying an invalid MAC, make sure the error that's - # returned is not a crazy stack trace, but something nice to - # humans. - bad_mac1 = '00:E0:81:DD:D1:ZZ' # ZZ is bad. - bad_mac2 = '00:E0:81:DD:D1:XX' # XX is bad. - ok_mac = factory.make_mac_address() - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'mac_address': [bad_mac1, bad_mac2, ok_mac], - }) - observed = response.status_code, response.content - expected = ( - Equals(httplib.BAD_REQUEST), - Contains( - "Invalid MAC address(es): 00:E0:81:DD:D1:ZZ, " - "00:E0:81:DD:D1:XX"), - ) - self.assertThat(observed, MatchesListwise(expected)) - - def test_GET_list_with_agent_name_filters_by_agent_name(self): - non_listed_node = factory.make_node( - agent_name=factory.make_name('agent_name')) - ignore_unused(non_listed_node) - agent_name = factory.make_name('agent-name') - node = factory.make_node(agent_name=agent_name) - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'agent_name': agent_name, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertSequenceEqual( - [node.system_id], extract_system_ids(parsed_result)) - - def test_GET_list_with_agent_name_filters_with_empty_string(self): - factory.make_node(agent_name=factory.make_name('agent-name')) - node = factory.make_node(agent_name='') - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'agent_name': '', - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertSequenceEqual( - [node.system_id], extract_system_ids(parsed_result)) - - def test_GET_list_without_agent_name_does_not_filter(self): - nodes = [ - factory.make_node(agent_name=factory.make_name('agent-name')) - for i in range(3)] - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertSequenceEqual( - [node.system_id for node in nodes], - extract_system_ids(parsed_result)) - - def test_GET_list_with_zone_filters_by_zone(self): - non_listed_node = factory.make_node( - zone=factory.make_zone(name='twilight')) - ignore_unused(non_listed_node) - zone = factory.make_zone() - node = factory.make_node(zone=zone) - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list', - 'zone': zone.name, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertSequenceEqual( - [node.system_id], extract_system_ids(parsed_result)) - - def test_GET_list_without_zone_does_not_filter(self): - nodes = [ - factory.make_node(zone=factory.make_zone()) - for i in range(3)] - response = self.client.get(reverse('nodes_handler'), {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertSequenceEqual( - [node.system_id for node in nodes], - extract_system_ids(parsed_result)) - - def test_GET_list_allocated_returns_only_allocated_with_user_token(self): - # If the user's allocated nodes have different session tokens, - # list_allocated should only return the nodes that have the - # current request's token on them. - node_1 = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=self.logged_in_user, - token=get_auth_tokens(self.logged_in_user)[0]) - second_token = create_auth_token(self.logged_in_user) - factory.make_node( - owner=self.logged_in_user, status=NODE_STATUS.ALLOCATED, - token=second_token) - - user_2 = factory.make_user() - create_auth_token(user_2) - factory.make_node( - owner=self.logged_in_user, status=NODE_STATUS.ALLOCATED, - token=second_token) - - # At this point we have two nodes owned by the same user but - # allocated with different tokens, and a third node allocated to - # someone else entirely. We expect list_allocated to - # return the node with the same token as the one used in - # self.client, which is the one we set on node_1 above. - - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list_allocated'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [node_1.system_id], extract_system_ids(parsed_result)) - - def test_GET_list_allocated_filters_by_id(self): - # list_allocated takes an optional list of 'id' parameters to - # filter returned results. - current_token = get_auth_tokens(self.logged_in_user)[0] - nodes = [] - for i in range(3): - nodes.append(factory.make_node( - status=NODE_STATUS.ALLOCATED, - owner=self.logged_in_user, token=current_token)) - - required_node_ids = [nodes[0].system_id, nodes[1].system_id] - response = self.client.get(reverse('nodes_handler'), { - 'op': 'list_allocated', - 'id': required_node_ids, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - required_node_ids, extract_system_ids(parsed_result)) - - def test_POST_acquire_returns_available_node(self): - # The "acquire" operation returns an available node. - available_status = NODE_STATUS.READY - node = factory.make_node(status=available_status, owner=None) - response = self.client.post( - reverse('nodes_handler'), {'op': 'acquire'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(node.system_id, parsed_result['system_id']) - - def test_POST_acquire_allocates_node(self): - # The "acquire" operation allocates the node it returns. - available_status = NODE_STATUS.READY - node = factory.make_node(status=available_status, owner=None) - self.client.post(reverse('nodes_handler'), {'op': 'acquire'}) - node = Node.objects.get(system_id=node.system_id) - self.assertEqual(self.logged_in_user, node.owner) - - def test_POST_acquire_sets_agent_name(self): - available_status = NODE_STATUS.READY - node = factory.make_node( - status=available_status, owner=None, - agent_name=factory.make_name('agent-name')) - agent_name = factory.make_name('agent-name') - self.client.post( - reverse('nodes_handler'), - {'op': 'acquire', 'agent_name': agent_name}) - node = Node.objects.get(system_id=node.system_id) - self.assertEqual(agent_name, node.agent_name) - - def test_POST_acquire_agent_name_defaults_to_empty_string(self): - available_status = NODE_STATUS.READY - agent_name = factory.make_name('agent-name') - node = factory.make_node( - status=available_status, owner=None, agent_name=agent_name) - self.client.post(reverse('nodes_handler'), {'op': 'acquire'}) - node = Node.objects.get(system_id=node.system_id) - self.assertEqual('', node.agent_name) - - def test_POST_acquire_fails_if_no_node_present(self): - # The "acquire" operation returns a Conflict error if no nodes - # are available. - response = self.client.post( - reverse('nodes_handler'), {'op': 'acquire'}) - # Fails with Conflict error: resource can't satisfy request. - self.assertEqual(httplib.CONFLICT, response.status_code) - - def test_POST_acquire_ignores_already_allocated_node(self): - factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - response = self.client.post( - reverse('nodes_handler'), {'op': 'acquire'}) - self.assertEqual(httplib.CONFLICT, response.status_code) - - def test_POST_acquire_chooses_candidate_matching_constraint(self): - # If "acquire" is passed a constraint, it will go for a node - # matching that constraint even if there's tons of other nodes - # available. - # (Creating lots of nodes here to minimize the chances of this - # passing by accident). - available_nodes = [ - factory.make_node(status=NODE_STATUS.READY, owner=None) - for counter in range(20)] - desired_node = random.choice(available_nodes) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'name': desired_node.hostname, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(desired_node.hostname, parsed_result['hostname']) - - def test_POST_acquire_would_rather_fail_than_disobey_constraint(self): - # If "acquire" is passed a constraint, it won't return a node - # that does not meet that constraint. Even if it means that it - # can't meet the request. - factory.make_node(status=NODE_STATUS.READY, owner=None) - desired_node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'name': desired_node.system_id, - }) - self.assertEqual(httplib.CONFLICT, response.status_code) - - def test_POST_acquire_ignores_unknown_constraint(self): - node = factory.make_node(status=NODE_STATUS.READY, owner=None) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - factory.getRandomString(): factory.getRandomString(), - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(node.system_id, parsed_result['system_id']) - - def test_POST_acquire_allocates_node_by_name(self): - # Positive test for name constraint. - # If a name constraint is given, "acquire" attempts to allocate - # a node of that name. - node = factory.make_node(status=NODE_STATUS.READY, owner=None) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'name': node.hostname, - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - node.hostname, json.loads(response.content)['hostname']) - - def test_POST_acquire_treats_unknown_name_as_resource_conflict(self): - # A name constraint naming an unknown node produces a resource - # conflict: most likely the node existed but has changed or - # disappeared. - # Certainly it's not a 404, since the resource named in the URL - # is "nodes/," which does exist. - factory.make_node(status=NODE_STATUS.READY, owner=None) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'name': factory.getRandomString(), - }) - self.assertEqual(httplib.CONFLICT, response.status_code) - - def test_POST_acquire_allocates_node_by_arch(self): - # Asking for a particular arch acquires a node with that arch. - arch = make_usable_architecture(self) - node = factory.make_node(status=NODE_STATUS.READY, architecture=arch) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'arch': arch, - }) - self.assertEqual(httplib.OK, response.status_code) - response_json = json.loads(response.content) - self.assertEqual(node.architecture, response_json['architecture']) - - def test_POST_acquire_treats_unknown_arch_as_bad_request(self): - # Asking for an unknown arch returns an HTTP "400 Bad Request" - factory.make_node(status=NODE_STATUS.READY) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'arch': 'sparc', - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - - def test_POST_acquire_allocates_node_by_cpu(self): - # Asking for enough cpu acquires a node with at least that. - node = factory.make_node(status=NODE_STATUS.READY, cpu_count=3) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'cpu_count': 2, - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(node.system_id, response_json['system_id']) - - def test_POST_acquire_allocates_node_by_float_cpu(self): - # Asking for a needlessly precise number of cpus works. - node = factory.make_node(status=NODE_STATUS.READY, cpu_count=1) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'cpu_count': '1.0', - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(node.system_id, response_json['system_id']) - - def test_POST_acquire_fails_with_invalid_cpu(self): - # Asking for an invalid amount of cpu returns a bad request. - factory.make_node(status=NODE_STATUS.READY) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'cpu_count': 'plenty', - }) - self.assertResponseCode(httplib.BAD_REQUEST, response) - - def test_POST_acquire_allocates_node_by_mem(self): - # Asking for enough memory acquires a node with at least that. - node = factory.make_node(status=NODE_STATUS.READY, memory=1024) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'mem': 1024, - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(node.system_id, response_json['system_id']) - - def test_POST_acquire_fails_with_invalid_mem(self): - # Asking for an invalid amount of memory returns a bad request. - factory.make_node(status=NODE_STATUS.READY) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'mem': 'bags', - }) - self.assertResponseCode(httplib.BAD_REQUEST, response) - - def test_POST_acquire_allocates_node_by_tags(self): - node = factory.make_node(status=NODE_STATUS.READY) - node_tag_names = ["fast", "stable", "cute"] - node.tags = [factory.make_tag(t) for t in node_tag_names] - # Legacy call using comma-separated tags. - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': ['fast', 'stable'], - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertItemsEqual(node_tag_names, response_json['tag_names']) - - def test_POST_acquire_allocates_node_by_zone(self): - factory.make_node(status=NODE_STATUS.READY) - zone = factory.make_zone() - node = factory.make_node(status=NODE_STATUS.READY, zone=zone) - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'zone': zone.name, - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(node.system_id, response_json['system_id']) - - def test_POST_acquire_allocates_node_by_zone_fails_if_no_node(self): - factory.make_node(status=NODE_STATUS.READY) - zone = factory.make_zone() - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'zone': zone.name, - }) - self.assertResponseCode(httplib.CONFLICT, response) - - def test_POST_acquire_rejects_unknown_zone(self): - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'zone': factory.make_name('zone'), - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - - def test_POST_acquire_allocates_node_by_tags_comma_separated(self): - node = factory.make_node(status=NODE_STATUS.READY) - node_tag_names = ["fast", "stable", "cute"] - node.tags = [factory.make_tag(t) for t in node_tag_names] - # Legacy call using comma-separated tags. - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': 'fast, stable', - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertItemsEqual(node_tag_names, response_json['tag_names']) - - def test_POST_acquire_allocates_node_by_tags_space_separated(self): - node = factory.make_node(status=NODE_STATUS.READY) - node_tag_names = ["fast", "stable", "cute"] - node.tags = [factory.make_tag(t) for t in node_tag_names] - # Legacy call using space-separated tags. - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': 'fast stable', - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertItemsEqual(node_tag_names, response_json['tag_names']) - - def test_POST_acquire_allocates_node_by_tags_comma_space_separated(self): - node = factory.make_node(status=NODE_STATUS.READY) - node_tag_names = ["fast", "stable", "cute"] - node.tags = [factory.make_tag(t) for t in node_tag_names] - # Legacy call using comma-and-space-separated tags. - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': 'fast, stable cute', - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertItemsEqual(node_tag_names, response_json['tag_names']) - - def test_POST_acquire_allocates_node_by_tags_mixed_input(self): - node = factory.make_node(status=NODE_STATUS.READY) - node_tag_names = ["fast", "stable", "cute"] - node.tags = [factory.make_tag(t) for t in node_tag_names] - # Mixed call using comma-separated tags in a list. - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': ['fast, stable', 'cute'], - }) - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertItemsEqual(node_tag_names, response_json['tag_names']) - - def test_POST_acquire_fails_without_all_tags(self): - # Asking for particular tags does not acquire if no node has all tags. - node1 = factory.make_node(status=NODE_STATUS.READY) - node1.tags = [factory.make_tag(t) for t in ("fast", "stable", "cute")] - node2 = factory.make_node(status=NODE_STATUS.READY) - node2.tags = [factory.make_tag("cheap")] - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': 'fast, cheap', - }) - self.assertResponseCode(httplib.CONFLICT, response) - - def test_POST_acquire_fails_with_unknown_tags(self): - # Asking for a tag that does not exist gives a specific error. - node = factory.make_node(status=NODE_STATUS.READY) - node.tags = [factory.make_tag("fast")] - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'tags': 'fast, hairy, boo', - }) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - dict(tags=["No such tag(s): 'hairy', 'boo'."]), - json.loads(response.content)) - - def test_POST_acquire_allocates_node_connected_to_routers(self): - macs = [factory.make_MAC() for counter in range(3)] - node = factory.make_node(routers=macs, status=NODE_STATUS.READY) - factory.make_node(routers=[]) - - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'connected_to': [macs[2].get_raw(), macs[0].get_raw()], - }) - - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(node.system_id, response_json['system_id']) - - def test_POST_acquire_allocates_node_not_connected_to_routers(self): - macs = [MAC('aa:bb:cc:dd:ee:ff'), MAC('00:11:22:33:44:55')] - factory.make_node(routers=macs, status=NODE_STATUS.READY) - factory.make_node( - routers=[MAC('11:11:11:11:11:11')], status=NODE_STATUS.READY) - node = factory.make_node(status=NODE_STATUS.READY) - - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'not_connected_to': ['aa:bb:cc:dd:ee:ff', '11:11:11:11:11:11'], - }) - - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(node.system_id, response_json['system_id']) - - def test_POST_acquire_allocates_node_by_network(self): - networks = factory.make_networks(5) - macs = [ - factory.make_mac_address( - node=factory.make_node(status=NODE_STATUS.READY), - networks=[network]) - for network in networks - ] - # We'll make it so that only the node and network at this index will - # match the request. - pick = 2 - - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'networks': [networks[pick].name], - }) - - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(macs[pick].node.system_id, response_json['system_id']) - - def test_POST_acquire_allocates_node_by_not_network(self): - networks = factory.make_networks(5) - for network in networks: - node = factory.make_node(status=NODE_STATUS.READY) - factory.make_mac_address(node=node, networks=[network]) - right_node = factory.make_node(status=NODE_STATUS.READY) - factory.make_mac_address(node=node, networks=[factory.make_network()]) - - response = self.client.post(reverse('nodes_handler'), { - 'op': 'acquire', - 'not_networks': [network.name for network in networks], - }) - - self.assertResponseCode(httplib.OK, response) - response_json = json.loads(response.content) - self.assertEqual(right_node.system_id, response_json['system_id']) - - def test_POST_acquire_obeys_not_in_zone(self): - # Zone we don't want to acquire from. - not_in_zone = factory.make_zone() - nodes = [ - factory.make_node(status=NODE_STATUS.READY, zone=not_in_zone) - for _ in range(5) - ] - # Pick a node in the middle to avoid false negatives if acquire() - # always tries the oldest, or the newest, node first. - eligible_node = nodes[2] - eligible_node.zone = factory.make_zone() - eligible_node.save() - - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'acquire', - 'not_in_zone': [not_in_zone.name], - }) - self.assertEqual(httplib.OK, response.status_code) - - system_id = json.loads(response.content)['system_id'] - self.assertEqual(eligible_node.system_id, system_id) - - def test_POST_acquire_sets_a_token(self): - # "acquire" should set the Token being used in the request on - # the Node that is allocated. - available_status = NODE_STATUS.READY - node = factory.make_node(status=available_status, owner=None) - self.client.post(reverse('nodes_handler'), {'op': 'acquire'}) - node = Node.objects.get(system_id=node.system_id) - oauth_key = self.client.token.key - self.assertEqual(oauth_key, node.token.key) - - def test_POST_accept_gets_node_out_of_declared_state(self): - # This will change when we add provisioning. Until then, - # acceptance gets a node straight to Ready state. - self.become_admin() - target_state = NODE_STATUS.COMMISSIONING - - node = factory.make_node(status=NODE_STATUS.DECLARED) - response = self.client.post( - reverse('nodes_handler'), - {'op': 'accept', 'nodes': [node.system_id]}) - accepted_ids = [ - accepted_node['system_id'] - for accepted_node in json.loads(response.content)] - self.assertEqual( - (httplib.OK, [node.system_id]), - (response.status_code, accepted_ids)) - self.assertEqual(target_state, reload_object(node).status) - - def test_POST_quietly_accepts_empty_set(self): - response = self.client.post(reverse('nodes_handler'), {'op': 'accept'}) - self.assertEqual( - (httplib.OK, "[]"), (response.status_code, response.content)) - - def test_POST_accept_rejects_impossible_state_changes(self): - self.become_admin() - acceptable_states = set([ - NODE_STATUS.DECLARED, - NODE_STATUS.COMMISSIONING, - NODE_STATUS.READY, - ]) - unacceptable_states = ( - set(map_enum(NODE_STATUS).values()) - acceptable_states) - nodes = { - status: factory.make_node(status=status) - for status in unacceptable_states} - responses = { - status: self.client.post( - reverse('nodes_handler'), { - 'op': 'accept', - 'nodes': [node.system_id], - }) - for status, node in nodes.items()} - # All of these attempts are rejected with Conflict errors. - self.assertEqual( - {status: httplib.CONFLICT for status in unacceptable_states}, - { - status: responses[status].status_code - for status in unacceptable_states}) - - for status, response in responses.items(): - # Each error describes the problem. - self.assertIn("Cannot accept node enlistment", response.content) - # Each error names the node it encountered a problem with. - self.assertIn(nodes[status].system_id, response.content) - # Each error names the node state that the request conflicted - # with. - self.assertIn(NODE_STATUS_CHOICES_DICT[status], response.content) - - def test_POST_accept_fails_if_node_does_not_exist(self): - self.become_admin() - # Make sure there is a node, it just isn't the one being accepted - factory.make_node() - node_id = factory.getRandomString() - response = self.client.post( - reverse('nodes_handler'), {'op': 'accept', 'nodes': [node_id]}) - self.assertEqual( - (httplib.BAD_REQUEST, "Unknown node(s): %s." % node_id), - (response.status_code, response.content)) - - def test_POST_accept_accepts_multiple_nodes(self): - # This will change when we add provisioning. Until then, - # acceptance gets a node straight to Ready state. - self.become_admin() - target_state = NODE_STATUS.COMMISSIONING - - nodes = [ - factory.make_node(status=NODE_STATUS.DECLARED) - for counter in range(2)] - node_ids = [node.system_id for node in nodes] - response = self.client.post(reverse('nodes_handler'), { - 'op': 'accept', - 'nodes': node_ids, - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - [target_state] * len(nodes), - [reload_object(node).status for node in nodes]) - - def test_POST_accept_returns_actually_accepted_nodes(self): - self.become_admin() - acceptable_nodes = [ - factory.make_node(status=NODE_STATUS.DECLARED) - for counter in range(2) - ] - accepted_node = factory.make_node(status=NODE_STATUS.READY) - nodes = acceptable_nodes + [accepted_node] - response = self.client.post(reverse('nodes_handler'), { - 'op': 'accept', - 'nodes': [node.system_id for node in nodes], - }) - self.assertEqual(httplib.OK, response.status_code) - accepted_ids = [ - node['system_id'] for node in json.loads(response.content)] - self.assertItemsEqual( - [node.system_id for node in acceptable_nodes], accepted_ids) - self.assertNotIn(accepted_node.system_id, accepted_ids) - - def test_POST_quietly_releases_empty_set(self): - response = self.client.post( - reverse('nodes_handler'), {'op': 'release'}) - self.assertEqual( - (httplib.OK, "[]"), (response.status_code, response.content)) - - def test_POST_release_rejects_request_from_unauthorized_user(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'release', - 'nodes': [node.system_id], - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertEqual(NODE_STATUS.ALLOCATED, reload_object(node).status) - - def test_POST_release_fails_if_nodes_do_not_exist(self): - # Make sure there is a node, it just isn't among the ones to release - factory.make_node() - node_ids = {factory.getRandomString() for i in xrange(5)} - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'release', - 'nodes': node_ids - }) - # Awkward parsing, but the order may vary and it's not JSON - s = response.content - returned_ids = s[s.find(':') + 2:s.rfind('.')].split(', ') - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIn("Unknown node(s): ", response.content) - self.assertItemsEqual(node_ids, returned_ids) - - def test_POST_release_forbidden_if_user_cannot_edit_node(self): - # Create a bunch of nodes, owned by the logged in user - node_ids = { - factory.make_node( - status=NODE_STATUS.ALLOCATED, - owner=self.logged_in_user).system_id - for i in xrange(3) - } - # And one with no owner - another_node = factory.make_node(status=NODE_STATUS.RESERVED) - node_ids.add(another_node.system_id) - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'release', - 'nodes': node_ids - }) - self.assertEqual( - (httplib.FORBIDDEN, - "You don't have the required permission to release the " - "following node(s): %s." % another_node.system_id), - (response.status_code, response.content)) - - def test_POST_release_rejects_impossible_state_changes(self): - acceptable_states = { - NODE_STATUS.ALLOCATED, - NODE_STATUS.RESERVED, - NODE_STATUS.READY, - } - unacceptable_states = ( - set(map_enum(NODE_STATUS).values()) - acceptable_states) - owner = self.logged_in_user - nodes = [ - factory.make_node(status=status, owner=owner) - for status in unacceptable_states] - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'release', - 'nodes': [node.system_id for node in nodes], - }) - # Awkward parsing again, because a string is returned, not JSON - expected = [ - "%s ('%s')" % (node.system_id, node.display_status()) - for node in nodes - if node.status not in acceptable_states] - s = response.content - returned = s[s.rfind(':') + 2:s.rfind('.')].split(', ') - self.assertEqual(httplib.CONFLICT, response.status_code) - self.assertIn( - "Node(s) cannot be released in their current state:", - response.content) - self.assertItemsEqual(expected, returned) - - def test_POST_release_returns_modified_nodes(self): - owner = self.logged_in_user - acceptable_states = { - NODE_STATUS.READY, - NODE_STATUS.ALLOCATED, - NODE_STATUS.RESERVED, - } - nodes = [ - factory.make_node(status=status, owner=owner) - for status in acceptable_states - ] - response = self.client.post( - reverse('nodes_handler'), { - 'op': 'release', - 'nodes': [node.system_id for node in nodes], - }) - parsed_result = json.loads(response.content) - self.assertEqual(httplib.OK, response.status_code) - # The first node is READY, so shouldn't be touched - self.assertItemsEqual( - [nodes[1].system_id, nodes[2].system_id], - parsed_result) - - def test_handle_when_URL_is_repeated(self): - # bin/maas-enlist (in the maas-enlist package) has a bug where the - # path it uses is doubled up. This was not discovered previously - # because the API URL patterns were not anchored (see bug 1131323). - # For compatibility, MAAS will handle requests to obviously incorrect - # paths. It does *not* redirect because (a) it's not clear that curl - # (used by maas-enlist) supports HTTP 307 redirects, which are needed - # to support redirecting POSTs, and (b) curl does not follow redirects - # by default anyway. - architecture = make_usable_architecture(self) - response = self.client.post( - '/api/1.0/nodes/MAAS/api/1.0/nodes/', - { - 'op': 'new', - 'autodetect_nodegroup': '1', - 'hostname': factory.getRandomString(), - 'architecture': architecture, - 'mac_addresses': ['aa:bb:cc:dd:ee:ff'], - }) - self.assertEqual(httplib.OK, response.status_code) - system_id = json.loads(response.content)['system_id'] - nodes = Node.objects.filter(system_id=system_id) - self.assertIsNotNone(get_one(nodes)) - - def test_POST_set_zone_sets_zone_on_nodes(self): - self.become_admin() - node = factory.make_node() - zone = factory.make_zone() - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'set_zone', - 'nodes': [node.system_id], - 'zone': zone.name - }) - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual(zone, node.zone) - - def test_POST_set_zone_does_not_affect_other_nodes(self): - self.become_admin() - node = factory.make_node() - original_zone = node.zone - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'set_zone', - 'nodes': [factory.make_node().system_id], - 'zone': factory.make_zone().name - }) - self.assertEqual(httplib.OK, response.status_code) - node = reload_object(node) - self.assertEqual(original_zone, node.zone) - - def test_POST_set_zone_requires_admin(self): - node = factory.make_node(owner=self.logged_in_user) - original_zone = node.zone - response = self.client.post( - reverse('nodes_handler'), - { - 'op': 'set_zone', - 'nodes': [node.system_id], - 'zone': factory.make_zone().name - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - node = reload_object(node) - self.assertEqual(original_zone, node.zone) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_pxeconfig.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_pxeconfig.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_pxeconfig.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_pxeconfig.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,354 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for PXE configuration retrieval from the API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from django.test.client import RequestFactory -from maasserver import ( - api, - server_address, - ) -from maasserver.api import find_nodegroup_for_pxeconfig_request -from maasserver.enum import NODE_STATUS -from maasserver.models import ( - Config, - MACAddress, - ) -from maasserver.preseed import ( - compose_enlistment_preseed_url, - compose_preseed_url, - ) -from maasserver.testing.architecture import make_usable_architecture -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maastesting.fakemethod import FakeMethod -from mock import Mock -from netaddr import IPNetwork -from provisioningserver import kernel_opts -from provisioningserver.kernel_opts import KernelParameters -from testtools.matchers import ( - Contains, - ContainsAll, - Equals, - MatchesListwise, - StartsWith, - ) - - -class TestPXEConfigAPI(MAASServerTestCase): - - def get_default_params(self): - return { - "local": factory.getRandomIPAddress(), - "remote": factory.getRandomIPAddress(), - } - - def get_mac_params(self): - params = self.get_default_params() - params['mac'] = factory.make_mac_address().mac_address - return params - - def get_pxeconfig(self, params=None): - """Make a request to `pxeconfig`, and return its response dict.""" - if params is None: - params = self.get_default_params() - response = self.client.get(reverse('pxeconfig'), params) - return json.loads(response.content) - - def test_pxeconfig_returns_json(self): - response = self.client.get( - reverse('pxeconfig'), self.get_default_params()) - self.assertThat( - ( - response.status_code, - response['Content-Type'], - response.content, - response.content, - ), - MatchesListwise( - ( - Equals(httplib.OK), - Equals("application/json"), - StartsWith(b'{'), - Contains('arch'), - )), - response) - - def test_pxeconfig_returns_all_kernel_parameters(self): - self.assertThat( - self.get_pxeconfig(), - ContainsAll(KernelParameters._fields)) - - def test_pxeconfig_returns_success_for_known_node(self): - params = self.get_mac_params() - response = self.client.get(reverse('pxeconfig'), params) - self.assertEqual(httplib.OK, response.status_code) - - def test_pxeconfig_returns_no_content_for_unknown_node(self): - params = dict(mac=factory.getRandomMACAddress(delimiter='-')) - response = self.client.get(reverse('pxeconfig'), params) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - - def test_pxeconfig_returns_success_for_detailed_but_unknown_node(self): - architecture = make_usable_architecture(self) - arch, subarch = architecture.split('/') - params = dict( - self.get_default_params(), - mac=factory.getRandomMACAddress(delimiter='-'), - arch=arch, - subarch=subarch) - response = self.client.get(reverse('pxeconfig'), params) - self.assertEqual(httplib.OK, response.status_code) - - def test_pxeconfig_returns_global_kernel_params_for_enlisting_node(self): - # An 'enlisting' node means it looks like a node with details but we - # don't know about it yet. It should still receive the global - # kernel options. - value = factory.getRandomString() - Config.objects.set_config("kernel_opts", value) - architecture = make_usable_architecture(self) - arch, subarch = architecture.split('/') - params = dict( - self.get_default_params(), - mac=factory.getRandomMACAddress(delimiter='-'), - arch=arch, - subarch=subarch) - response = self.client.get(reverse('pxeconfig'), params) - response_dict = json.loads(response.content) - self.assertEqual(value, response_dict['extra_opts']) - - def test_pxeconfig_uses_present_boot_image(self): - release = Config.objects.get_config('commissioning_distro_series') - nodegroup = factory.make_node_group() - factory.make_boot_image( - architecture="amd64", release=release, nodegroup=nodegroup, - purpose="commissioning") - params = self.get_default_params() - params['cluster_uuid'] = nodegroup.uuid - params_out = self.get_pxeconfig(params) - self.assertEqual("amd64", params_out["arch"]) - - def test_pxeconfig_defaults_to_i386_for_default(self): - # As a lowest-common-denominator, i386 is chosen when the node is not - # yet known to MAAS. - expected_arch = tuple( - make_usable_architecture( - self, arch_name="i386", subarch_name="generic").split("/")) - params_out = self.get_pxeconfig() - observed_arch = params_out["arch"], params_out["subarch"] - self.assertEqual(expected_arch, observed_arch) - - def test_pxeconfig_uses_fixed_hostname_for_enlisting_node(self): - self.assertEqual('maas-enlist', self.get_pxeconfig().get('hostname')) - - def test_pxeconfig_uses_enlistment_domain_for_enlisting_node(self): - self.assertEqual( - Config.objects.get_config('enlistment_domain'), - self.get_pxeconfig().get('domain')) - - def test_pxeconfig_splits_domain_from_node_hostname(self): - host = factory.make_name('host') - domain = factory.make_name('domain') - full_hostname = '.'.join([host, domain]) - node = factory.make_node(hostname=full_hostname) - mac = factory.make_mac_address(node=node) - params = self.get_default_params() - params['mac'] = mac.mac_address - pxe_config = self.get_pxeconfig(params) - self.assertEqual(host, pxe_config.get('hostname')) - self.assertNotIn(domain, pxe_config.values()) - - def test_pxeconfig_uses_nodegroup_domain_for_node(self): - mac = factory.make_mac_address() - params = self.get_default_params() - params['mac'] = mac - self.assertEqual( - mac.node.nodegroup.name, - self.get_pxeconfig(params).get('domain')) - - def get_without_param(self, param): - """Request a `pxeconfig()` response, but omit `param` from request.""" - params = self.get_params() - del params[param] - return self.client.get(reverse('pxeconfig'), params) - - def silence_get_ephemeral_name(self): - # Silence `get_ephemeral_name` to avoid having to fetch the - # ephemeral name from the filesystem. - self.patch( - kernel_opts, 'get_ephemeral_name', - FakeMethod(result=factory.getRandomString())) - - def test_pxeconfig_has_enlistment_preseed_url_for_default(self): - self.silence_get_ephemeral_name() - params = self.get_default_params() - response = self.client.get(reverse('pxeconfig'), params) - self.assertEqual( - compose_enlistment_preseed_url(), - json.loads(response.content)["preseed_url"]) - - def test_pxeconfig_enlistment_preseed_url_detects_request_origin(self): - self.silence_get_ephemeral_name() - hostname = factory.make_hostname() - ng_url = 'http://%s' % hostname - network = IPNetwork("10.1.1/24") - ip = factory.getRandomIPInNetwork(network) - self.patch(server_address, 'gethostbyname', Mock(return_value=ip)) - factory.make_node_group(maas_url=ng_url, network=network) - params = self.get_default_params() - - # Simulate that the request originates from ip by setting - # 'REMOTE_ADDR'. - response = self.client.get( - reverse('pxeconfig'), params, REMOTE_ADDR=ip) - self.assertThat( - json.loads(response.content)["preseed_url"], - StartsWith(ng_url)) - - def test_pxeconfig_enlistment_log_host_url_detects_request_origin(self): - self.silence_get_ephemeral_name() - hostname = factory.make_hostname() - ng_url = 'http://%s' % hostname - network = IPNetwork("10.1.1/24") - ip = factory.getRandomIPInNetwork(network) - mock = self.patch( - server_address, 'gethostbyname', Mock(return_value=ip)) - factory.make_node_group(maas_url=ng_url, network=network) - params = self.get_default_params() - - # Simulate that the request originates from ip by setting - # 'REMOTE_ADDR'. - response = self.client.get( - reverse('pxeconfig'), params, REMOTE_ADDR=ip) - self.assertEqual( - (ip, hostname), - (json.loads(response.content)["log_host"], mock.call_args[0][0])) - - def test_pxeconfig_has_preseed_url_for_known_node(self): - params = self.get_mac_params() - node = MACAddress.objects.get(mac_address=params['mac']).node - response = self.client.get(reverse('pxeconfig'), params) - self.assertEqual( - compose_preseed_url(node), - json.loads(response.content)["preseed_url"]) - - def test_find_nodegroup_for_pxeconfig_request_uses_cluster_uuid(self): - # find_nodegroup_for_pxeconfig_request returns the nodegroup - # identified by the cluster_uuid parameter, if given. It - # completely ignores the other node or request details, as shown - # here by passing a uuid for a different cluster. - params = self.get_mac_params() - nodegroup = factory.make_node_group() - params['cluster_uuid'] = nodegroup.uuid - request = RequestFactory().get(reverse('pxeconfig'), params) - self.assertEqual( - nodegroup, - find_nodegroup_for_pxeconfig_request(request)) - - def test_preseed_url_for_known_node_uses_nodegroup_maas_url(self): - ng_url = 'http://%s' % factory.make_name('host') - network = IPNetwork("10.1.1/24") - ip = factory.getRandomIPInNetwork(network) - self.patch(server_address, 'gethostbyname', Mock(return_value=ip)) - nodegroup = factory.make_node_group(maas_url=ng_url, network=network) - params = self.get_mac_params() - node = MACAddress.objects.get(mac_address=params['mac']).node - node.nodegroup = nodegroup - node.save() - - # Simulate that the request originates from ip by setting - # 'REMOTE_ADDR'. - response = self.client.get( - reverse('pxeconfig'), params, REMOTE_ADDR=ip) - self.assertThat( - json.loads(response.content)["preseed_url"], - StartsWith(ng_url)) - - def test_get_boot_purpose_unknown_node(self): - # A node that's not yet known to MAAS is assumed to be enlisting, - # which uses a "commissioning" image. - self.assertEqual("commissioning", api.get_boot_purpose(None)) - - def test_get_boot_purpose_known_node(self): - # The following table shows the expected boot "purpose" for each set - # of node parameters. - options = [ - ("poweroff", {"status": NODE_STATUS.DECLARED}), - ("commissioning", {"status": NODE_STATUS.COMMISSIONING}), - ("poweroff", {"status": NODE_STATUS.FAILED_TESTS}), - ("poweroff", {"status": NODE_STATUS.MISSING}), - ("poweroff", {"status": NODE_STATUS.READY}), - ("poweroff", {"status": NODE_STATUS.RESERVED}), - ("install", {"status": NODE_STATUS.ALLOCATED, "netboot": True}), - ("xinstall", {"status": NODE_STATUS.ALLOCATED, "netboot": True}), - ("local", {"status": NODE_STATUS.ALLOCATED, "netboot": False}), - ("poweroff", {"status": NODE_STATUS.RETIRED}), - ] - node = factory.make_node() - for purpose, parameters in options: - if purpose == "xinstall": - node.use_fastpath_installer() - for name, value in parameters.items(): - setattr(node, name, value) - self.assertEqual(purpose, api.get_boot_purpose(node)) - - def test_pxeconfig_uses_boot_purpose(self): - fake_boot_purpose = factory.make_name("purpose") - self.patch(api, "get_boot_purpose", lambda node: fake_boot_purpose) - response = self.client.get(reverse('pxeconfig'), - self.get_default_params()) - self.assertEqual( - fake_boot_purpose, - json.loads(response.content)["purpose"]) - - def test_pxeconfig_returns_fs_host_as_cluster_controller(self): - # The kernel parameter `fs_host` points to the cluster controller - # address, which is passed over within the `local` parameter. - params = self.get_default_params() - kernel_params = KernelParameters(**self.get_pxeconfig(params)) - self.assertEqual(params["local"], kernel_params.fs_host) - - def test_pxeconfig_returns_extra_kernel_options(self): - node = factory.make_node() - extra_kernel_opts = factory.getRandomString() - Config.objects.set_config('kernel_opts', extra_kernel_opts) - mac = factory.make_mac_address(node=node) - params = self.get_default_params() - params['mac'] = mac.mac_address - pxe_config = self.get_pxeconfig(params) - self.assertEqual(extra_kernel_opts, pxe_config['extra_opts']) - - def test_pxeconfig_returns_None_for_extra_kernel_opts(self): - mac = factory.make_mac_address() - params = self.get_default_params() - params['mac'] = mac.mac_address - pxe_config = self.get_pxeconfig(params) - self.assertEqual(None, pxe_config['extra_opts']) - - def test_pxeconfig_sets_nonsense_label_for_insane_state(self): - # If pxeconfig() encounters a state where there is no relevant - # BootImage for a given set of (nodegroup, arch, subarch, - # release, purpose) it sets the label to no-such-image. This is - # clearly nonsensical, but this state only arises during tests - # or an insane environment. - mac = factory.make_mac_address() - params = self.get_default_params() - params['mac'] = mac.mac_address - params['arch'] = 'iHaveNoIdea' - pxe_config = self.get_pxeconfig(params) - self.assertEqual('no-such-image', pxe_config['label']) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,786 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test maasserver API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from functools import partial -import httplib -from itertools import izip -import json - -from django.core.urlresolvers import reverse -from maasserver import api -from maasserver.api import ( - DISPLAYED_NODEGROUPINTERFACE_FIELDS, - store_node_power_parameters, - warn_if_missing_boot_images, - ) -from maasserver.enum import ( - COMPONENT, - NODEGROUP_STATUS, - NODEGROUPINTERFACE_MANAGEMENT, - ) -from maasserver.exceptions import MAASAPIBadRequest -from maasserver.forms_settings import INVALID_SETTING_MSG_TEMPLATE -from maasserver.models import ( - Config, - NodeGroup, - NodeGroupInterface, - SSHKey, - ) -from maasserver.models.user import get_auth_tokens -from maasserver.testing import ( - get_data, - reload_object, - ) -from maasserver.testing.api import ( - APITestCase, - log_in_as_normal_user, - make_worker_client, - ) -from maasserver.testing.factory import factory -from maasserver.testing.oauthclient import OAuthAuthenticatedClient -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.tests.test_forms import make_interface_settings -from maasserver.utils import absolute_reverse -from maasserver.utils.orm import get_one -from maastesting.djangotestcase import TransactionTestCase -from maastesting.matchers import MockCalledOnceWith -from mock import Mock -from testtools.matchers import ( - Contains, - Equals, - MatchesListwise, - MatchesStructure, - ) - - -class TestAuthentication(MAASServerTestCase): - """Tests for `maasserver.api_auth`.""" - - def test_invalid_oauth_request(self): - # An OAuth-signed request that does not validate is an error. - user = factory.make_user() - client = OAuthAuthenticatedClient(user) - get_auth_tokens(user).delete() # Delete the user's API keys. - response = client.post(reverse('nodes_handler'), {'op': 'start'}) - observed = response.status_code, response.content - expected = ( - Equals(httplib.UNAUTHORIZED), - Contains("Invalid access token:"), - ) - self.assertThat(observed, MatchesListwise(expected)) - - -class TestXSSBugs(MAASServerTestCase): - """Tests for making sure we don't allow cross-site scripting bugs.""" - - def test_invalid_signature_response_is_textplain(self): - response = self.client.get( - reverse('nodes_handler'), - {'op': ''}) - self.assertIn("text/plain", response.get("Content-Type")) - self.assertNotIn("text/html", response.get("Content-Type")) - - -class TestStoreNodeParameters(MAASServerTestCase): - """Tests for `store_node_power_parameters`.""" - - def setUp(self): - super(TestStoreNodeParameters, self).setUp() - self.node = factory.make_node() - self.save = self.patch(self.node, "save") - self.request = Mock() - - def test_power_type_not_given(self): - # When power_type is not specified, nothing happens. - self.request.POST = {} - self.node.power_type = '' - store_node_power_parameters(self.node, self.request) - self.assertEqual('', self.node.power_type) - self.assertEqual('', self.node.power_parameters) - self.save.assert_has_calls([]) - - def test_power_type_set_but_no_parameters(self): - # When power_type is valid, it is set. However, if power_parameters is - # not specified, the node's power_parameters is left alone, and the - # node is saved. - power_type = factory.getRandomPowerType() - self.request.POST = {"power_type": power_type} - store_node_power_parameters(self.node, self.request) - self.assertEqual(power_type, self.node.power_type) - self.assertEqual("", self.node.power_parameters) - self.save.assert_called_once_with() - - def test_power_type_set_with_parameters(self): - # When power_type is valid, and power_parameters is valid JSON, both - # fields are set on the node, and the node is saved. - power_type = factory.getRandomPowerType() - power_parameters = {"foo": [1, 2, 3]} - self.request.POST = { - "power_type": power_type, - "power_parameters": json.dumps(power_parameters), - } - store_node_power_parameters(self.node, self.request) - self.assertEqual(power_type, self.node.power_type) - self.assertEqual(power_parameters, self.node.power_parameters) - self.save.assert_called_once_with() - - def test_power_type_set_with_invalid_parameters(self): - # When power_type is valid, but power_parameters is invalid JSON, the - # node is not saved, and an exception is raised. - power_type = factory.getRandomPowerType() - self.request.POST = { - "power_type": power_type, - "power_parameters": "Not JSON.", - } - self.assertRaises( - MAASAPIBadRequest, store_node_power_parameters, - self.node, self.request) - self.save.assert_has_calls([]) - - def test_invalid_power_type(self): - # When power_type is invalid, the node is not saved, and an exception - # is raised. - self.request.POST = {"power_type": factory.make_name("bogus")} - self.assertRaises( - MAASAPIBadRequest, store_node_power_parameters, - self.node, self.request) - self.save.assert_has_calls([]) - - def test_unknown_power_type(self): - # Sometimes a node doesn't know its power type, and will declare its - # powertype as ''; store_node_power_parameters will store that - # appropriately. - power_type = '' - self.request.POST = { - "power_type": '', - } - store_node_power_parameters(self.node, self.request) - self.assertEqual(power_type, self.node.power_type) - self.save.assert_called_once_with() - - -class AccountAPITest(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/account/', reverse('account_handler')) - - def test_create_authorisation_token(self): - # The api operation create_authorisation_token returns a json dict - # with the consumer_key, the token_key and the token_secret in it. - response = self.client.post( - reverse('account_handler'), {'op': 'create_authorisation_token'}) - parsed_result = json.loads(response.content) - - self.assertEqual( - ['consumer_key', 'token_key', 'token_secret'], - sorted(parsed_result)) - self.assertIsInstance(parsed_result['consumer_key'], unicode) - self.assertIsInstance(parsed_result['token_key'], unicode) - self.assertIsInstance(parsed_result['token_secret'], unicode) - - def test_delete_authorisation_token_not_found(self): - # If the provided token_key does not exist (for the currently - # logged-in user), the api returns a 'Not Found' (404) error. - response = self.client.post( - reverse('account_handler'), - {'op': 'delete_authorisation_token', 'token_key': 'no-such-token'}) - - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_delete_authorisation_token_bad_request_no_token(self): - # token_key is a mandatory parameter when calling - # delete_authorisation_token. It it is not present in the request's - # parameters, the api returns a 'Bad Request' (400) error. - response = self.client.post( - reverse('account_handler'), {'op': 'delete_authorisation_token'}) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - - -class TestSSHKeyHandlers(APITestCase): - - def test_sshkeys_handler_path(self): - self.assertEqual( - '/api/1.0/account/prefs/sshkeys/', reverse('sshkeys_handler')) - - def test_sshkey_handler_path(self): - self.assertEqual( - '/api/1.0/account/prefs/sshkeys/key/', - reverse('sshkey_handler', args=['key'])) - - def test_list_works(self): - _, keys = factory.make_user_with_keys(user=self.logged_in_user) - params = dict(op="list") - response = self.client.get( - reverse('sshkeys_handler'), params) - self.assertEqual(httplib.OK, response.status_code, response) - parsed_result = json.loads(response.content) - expected_result = [ - dict( - id=keys[0].id, - key=keys[0].key, - resource_uri=reverse('sshkey_handler', args=[keys[0].id]), - ), - dict( - id=keys[1].id, - key=keys[1].key, - resource_uri=reverse('sshkey_handler', args=[keys[1].id]), - ), - ] - self.assertEqual(expected_result, parsed_result) - - def test_get_by_id_works(self): - _, keys = factory.make_user_with_keys( - n_keys=1, user=self.logged_in_user) - key = keys[0] - response = self.client.get( - reverse('sshkey_handler', args=[key.id])) - self.assertEqual(httplib.OK, response.status_code, response) - parsed_result = json.loads(response.content) - expected = dict( - id=key.id, - key=key.key, - resource_uri=reverse('sshkey_handler', args=[key.id]), - ) - self.assertEqual(expected, parsed_result) - - def test_delete_by_id_works(self): - _, keys = factory.make_user_with_keys( - n_keys=2, user=self.logged_in_user) - response = self.client.delete( - reverse('sshkey_handler', args=[keys[0].id])) - self.assertEqual(httplib.NO_CONTENT, response.status_code, response) - keys_after = SSHKey.objects.filter(user=self.logged_in_user) - self.assertEqual(1, len(keys_after)) - self.assertEqual(keys[1].id, keys_after[0].id) - - def test_delete_fails_if_not_your_key(self): - user, keys = factory.make_user_with_keys(n_keys=1) - response = self.client.delete( - reverse('sshkey_handler', args=[keys[0].id])) - self.assertEqual(httplib.FORBIDDEN, response.status_code, response) - self.assertEqual(1, len(SSHKey.objects.filter(user=user))) - - def test_adding_works(self): - key_string = get_data('data/test_rsa0.pub') - response = self.client.post( - reverse('sshkeys_handler'), - data=dict(op="new", key=key_string)) - self.assertEqual(httplib.CREATED, response.status_code) - parsed_response = json.loads(response.content) - self.assertEqual(key_string, parsed_response["key"]) - added_key = get_one(SSHKey.objects.filter(user=self.logged_in_user)) - self.assertEqual(key_string, added_key.key) - - def test_adding_catches_key_validation_errors(self): - key_string = factory.getRandomString() - response = self.client.post( - reverse('sshkeys_handler'), - data=dict(op='new', key=key_string)) - self.assertEqual(httplib.BAD_REQUEST, response.status_code, response) - self.assertIn("Invalid", response.content) - - def test_adding_returns_badrequest_when_key_not_in_form(self): - response = self.client.post( - reverse('sshkeys_handler'), - data=dict(op='new')) - self.assertEqual(httplib.BAD_REQUEST, response.status_code, response) - self.assertEqual( - dict(key=["This field is required."]), - json.loads(response.content)) - - -class MAASAPIAnonTest(MAASServerTestCase): - # The MAAS' handler is not accessible to anon users. - - def test_anon_get_config_forbidden(self): - response = self.client.get( - reverse('maas_handler'), - {'op': 'get_config'}) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_anon_set_config_forbidden(self): - response = self.client.post( - reverse('maas_handler'), - {'op': 'set_config'}) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - -class MAASAPITest(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/maas/', reverse('maas_handler')) - - def test_simple_user_get_config_forbidden(self): - response = self.client.get( - reverse('maas_handler'), - {'op': 'get_config'}) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_simple_user_set_config_forbidden(self): - response = self.client.post( - reverse('maas_handler'), - {'op': 'set_config'}) - - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_get_config_requires_name_param(self): - self.become_admin() - response = self.client.get( - reverse('maas_handler'), - { - 'op': 'get_config', - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual("No provided name!", response.content) - - def test_get_config_returns_config(self): - self.become_admin() - name = 'maas_name' - value = factory.getRandomString() - Config.objects.set_config(name, value) - response = self.client.get( - reverse('maas_handler'), - { - 'op': 'get_config', - 'name': name, - }) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertIn('application/json', response['Content-Type']) - self.assertEqual(value, parsed_result) - - def test_get_config_rejects_unknown_config_item(self): - self.become_admin() - name = factory.getRandomString() - value = factory.getRandomString() - Config.objects.set_config(name, value) - response = self.client.get( - reverse('maas_handler'), - { - 'op': 'get_config', - 'name': name, - }) - - self.assertEqual( - ( - httplib.BAD_REQUEST, - {name: [INVALID_SETTING_MSG_TEMPLATE % name]}, - ), - (response.status_code, json.loads(response.content))) - - def test_set_config_requires_name_param(self): - self.become_admin() - response = self.client.post( - reverse('maas_handler'), - { - 'op': 'set_config', - 'value': factory.getRandomString(), - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual("No provided name!", response.content) - - def test_set_config_requires_string_name_param(self): - self.become_admin() - value = factory.getRandomString() - response = self.client.post( - reverse('maas_handler'), - { - 'op': 'set_config', - 'name': '', # Invalid empty name. - 'value': value, - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual( - "Invalid name: Please enter a value", response.content) - - def test_set_config_requires_value_param(self): - self.become_admin() - response = self.client.post( - reverse('maas_handler'), - { - 'op': 'set_config', - 'name': factory.getRandomString(), - }) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertEqual("No provided value!", response.content) - - def test_admin_set_config(self): - self.become_admin() - name = 'maas_name' - value = factory.getRandomString() - response = self.client.post( - reverse('maas_handler'), - { - 'op': 'set_config', - 'name': name, - 'value': value, - }) - - self.assertEqual( - httplib.OK, response.status_code, response.content) - stored_value = Config.objects.get_config(name) - self.assertEqual(stored_value, value) - - def test_admin_set_config_rejects_unknown_config_item(self): - self.become_admin() - name = factory.getRandomString() - value = factory.getRandomString() - response = self.client.post( - reverse('maas_handler'), - { - 'op': 'set_config', - 'name': name, - 'value': value, - }) - - self.assertEqual( - ( - httplib.BAD_REQUEST, - {name: [INVALID_SETTING_MSG_TEMPLATE % name]}, - ), - (response.status_code, json.loads(response.content))) - - -class APIErrorsTest(TransactionTestCase): - - def test_internal_error_generates_proper_api_response(self): - error_message = factory.getRandomString() - - # Monkey patch api.create_node to have it raise a RuntimeError. - def raise_exception(*args, **kwargs): - raise RuntimeError(error_message) - self.patch(api, 'create_node', raise_exception) - response = self.client.post(reverse('nodes_handler'), {'op': 'new'}) - - self.assertEqual( - (httplib.INTERNAL_SERVER_ERROR, error_message), - (response.status_code, response.content)) - - -def dict_subset(obj, fields): - """Return a dict of a subset of the fields/values of an object.""" - undefined = object() - values = (getattr(obj, field, undefined) for field in fields) - return { - field: value for field, value in izip(fields, values) - if value is not undefined - } - - -class TestNodeGroupInterfacesAPI(APITestCase): - - def test_list_lists_interfaces(self): - self.become_admin() - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - [ - dict_subset( - interface, DISPLAYED_NODEGROUPINTERFACE_FIELDS) - for interface in nodegroup.nodegroupinterface_set.all() - ], - json.loads(response.content)) - - def test_list_does_not_work_for_normal_user(self): - nodegroup = NodeGroup.objects.ensure_master() - log_in_as_normal_user(self.client) - response = self.client.get( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - {'op': 'list'}) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_list_works_for_master_worker(self): - nodegroup = NodeGroup.objects.ensure_master() - client = make_worker_client(nodegroup) - response = client.get( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - {'op': 'list'}) - self.assertEqual(httplib.OK, response.status_code) - - def test_new_creates_interface(self): - self.become_admin() - nodegroup = factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - - interface_settings = make_interface_settings() - query_data = dict(interface_settings, op="new") - response = self.client.post( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - query_data) - self.assertEqual(httplib.OK, response.status_code, response.content) - expected_result = interface_settings - new_interface = NodeGroupInterface.objects.get( - nodegroup=nodegroup, interface=interface_settings['interface']) - self.assertThat( - new_interface, - MatchesStructure.byEquality(**expected_result)) - - def test_new_validates_data(self): - self.become_admin() - nodegroup = factory.make_node_group() - response = self.client.post( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - {'op': 'new', 'ip': 'invalid ip'}) - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'ip': ["Enter a valid IPv4 or IPv6 address."]}, - ), - (response.status_code, json.loads(response.content))) - - def test_new_does_not_work_for_normal_user(self): - nodegroup = NodeGroup.objects.ensure_master() - log_in_as_normal_user(self.client) - response = self.client.post( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - {'op': 'new'}) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_new_works_for_master_worker(self): - nodegroup = NodeGroup.objects.ensure_master() - client = make_worker_client(nodegroup) - response = client.post( - reverse('nodegroupinterfaces_handler', args=[nodegroup.uuid]), - {'op': 'new'}) - # It's a bad request because we've not entered all the required - # data but it's not FORBIDDEN which means we passed the test. - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'ip': ["This field is required."]}, - ), - (response.status_code, json.loads(response.content))) - - -class TestNodeGroupInterfaceAPIAccessPermissions(APITestCase): - # The nodegroup worker must have access to report_foreign_dhcp. - # Normal users do not have access. - - def test_read_does_not_work_for_normal_user(self): - nodegroup = NodeGroup.objects.ensure_master() - interface = factory.make_node_group_interface( - nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - log_in_as_normal_user(self.client) - response = self.client.get( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface])) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_read_works_for_master_worker(self): - nodegroup = NodeGroup.objects.ensure_master() - interface = factory.make_node_group_interface( - nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - client = make_worker_client(nodegroup) - response = client.get( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface])) - self.assertEqual(httplib.OK, response.status_code) - - def test_update_does_not_work_for_normal_user(self): - nodegroup = NodeGroup.objects.ensure_master() - interface = factory.make_node_group_interface( - nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - log_in_as_normal_user(self.client) - response = self.client_put( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface]), - {'ip_range_high': factory.getRandomIPAddress()}) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_update_works_for_master_worker(self): - nodegroup = NodeGroup.objects.ensure_master() - interface = factory.make_node_group_interface( - nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - self.client = make_worker_client(nodegroup) - get_ip_in_network = partial( - factory.getRandomIPInNetwork, interface.network) - new_ip_range_high = next( - ip for ip in iter(get_ip_in_network, None) - if ip != interface.ip_range_high) - response = self.client_put( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface]), - {'ip_range_high': new_ip_range_high}) - self.assertEqual(httplib.OK, response.status_code) - - def test_delete_does_not_work_for_normal_user(self): - nodegroup = NodeGroup.objects.ensure_master() - interface = factory.make_node_group_interface( - nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - log_in_as_normal_user(self.client) - response = self.client.delete( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface])) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_delete_works_for_master_worker(self): - nodegroup = NodeGroup.objects.ensure_master() - interface = factory.make_node_group_interface( - nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - self.client = make_worker_client(nodegroup) - response = self.client.delete( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface])) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - - -class TestNodeGroupInterfaceAPI(APITestCase): - - def test_read_interface(self): - self.become_admin() - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - response = self.client.get( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface])) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual( - dict_subset( - interface, DISPLAYED_NODEGROUPINTERFACE_FIELDS), - json.loads(response.content)) - - def test_update_interface(self): - self.become_admin() - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - get_ip_in_network = partial( - factory.getRandomIPInNetwork, interface.network) - new_ip_range_high = next( - ip for ip in iter(get_ip_in_network, None) - if ip != interface.ip_range_high) - response = self.client_put( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface]), - {'ip_range_high': new_ip_range_high}) - self.assertEqual( - (httplib.OK, new_ip_range_high), - (response.status_code, reload_object(interface).ip_range_high)) - - def test_delete_interface(self): - self.become_admin() - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - response = self.client.delete( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface])) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertFalse( - NodeGroupInterface.objects.filter( - interface=interface.interface, nodegroup=nodegroup).exists()) - - def test_report_foreign_dhcp_sets_value(self): - self.become_admin() - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - ip = factory.getRandomIPAddress() - response = self.client.post( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface]), - { - 'op': 'report_foreign_dhcp', - 'foreign_dhcp_ip': ip, - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(ip, reload_object(interface).foreign_dhcp_ip) - - def test_report_foreign_dhcp_unsets_value(self): - self.become_admin() - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - interface.foreign_dhcp_ip = factory.getRandomIPAddress() - interface.save() - response = self.client.post( - reverse( - 'nodegroupinterface_handler', - args=[nodegroup.uuid, interface.interface]), - { - 'op': 'report_foreign_dhcp', - 'foreign_dhcp_ip': '', - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(None, reload_object(interface).foreign_dhcp_ip) - - -class TestWarnIfMissingBootImages(MAASServerTestCase): - """Test `warn_if_missing_boot_images`.""" - - def test_warns_if_no_images_found(self): - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - recorder = self.patch(api, 'register_persistent_error') - warn_if_missing_boot_images() - self.assertIn( - COMPONENT.IMPORT_PXE_FILES, - [args[0][0] for args in recorder.call_args_list]) - # The persistent error message links to the clusters listing. - self.assertIn( - absolute_reverse("cluster-list"), - recorder.call_args_list[0][0][1]) - - def test_warns_if_any_nodegroup_has_no_images(self): - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - recorder = self.patch(api, 'register_persistent_error') - warn_if_missing_boot_images() - self.assertIn( - COMPONENT.IMPORT_PXE_FILES, - [args[0][0] for args in recorder.call_args_list]) - - def test_ignores_non_accepted_groups(self): - factory.make_node_group(status=NODEGROUP_STATUS.PENDING) - factory.make_node_group(status=NODEGROUP_STATUS.REJECTED) - recorder = self.patch(api, 'register_persistent_error') - warn_if_missing_boot_images() - self.assertEqual([], recorder.mock_calls) - - def test_removes_warning_if_images_found(self): - self.patch(api, 'register_persistent_error') - self.patch(api, 'discard_persistent_error') - factory.make_boot_image( - nodegroup=factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED)) - warn_if_missing_boot_images() - self.assertEqual([], api.register_persistent_error.mock_calls) - self.assertThat( - api.discard_persistent_error, - MockCalledOnceWith(COMPONENT.IMPORT_PXE_FILES)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_register.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_register.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_register.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_register.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,443 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the API's `register` method.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - - -import httplib -import json -from textwrap import dedent - -from celery.app import app_or_default -from django.conf import settings -from django.core.exceptions import ( - PermissionDenied, - ValidationError, - ) -from django.core.urlresolvers import reverse -from django.test.client import RequestFactory -from maasserver import api -from maasserver.enum import NODEGROUP_STATUS -from maasserver.forms import DEFAULT_DNS_ZONE_NAME -from maasserver.models import NodeGroup -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.tests.test_forms import make_interface_settings -from maastesting.matchers import MockCalledOnceWith -from mock import ( - ANY, - Mock, - ) -from testtools.matchers import MatchesStructure -from testtools.testcase import ExpectedException - - -class TestUpdateNodeGroupMAASURL(MAASServerTestCase): - """Tests for `update_nodegroup_maas_url`.""" - - def make_request(self, host, script="/script", path="/script/path"): - """Fake a GET request.""" - request_factory = RequestFactory(SCRIPT_NAME=script) - return request_factory.get(path, SERVER_NAME=host) - - def test_update_from_request(self): - request = self.make_request( - "example.com", script="/script", path="/script/path") - nodegroup = factory.make_node_group() - - api.update_nodegroup_maas_url(nodegroup, request) - - self.assertEqual("http://example.com/script", nodegroup.maas_url) - - def test_update_from_request_discarded_if_localhost(self): - request = self.make_request("localhost") - maas_url = factory.make_name('maas_url') - nodegroup = factory.make_node_group(maas_url=maas_url) - - api.update_nodegroup_maas_url(nodegroup, request) - - # nodegroup.maas_url was not updated. - self.assertEqual(maas_url, nodegroup.maas_url) - - -def create_configured_master(): - """Set up a master, already configured.""" - master = NodeGroup.objects.ensure_master() - master.uuid = factory.getRandomUUID() - master.save() - - -def reset_master(): - """Reset to a situation where no master has been accepted.""" - master = NodeGroup.objects.ensure_master() - master.status = NODEGROUP_STATUS.PENDING - master.save() - - -def create_local_cluster_config(test_case, uuid): - """Set up a local cluster config with the given UUID. - - This patches settings.LOCAL_CLUSTER_CONFIG to point to a valid - cluster config file. - """ - contents = dedent(""" - MAAS_URL=http://localhost/MAAS - CLUSTER_UUID="%s" - """ % uuid) - file_name = test_case.make_file(contents=contents) - test_case.patch(settings, 'LOCAL_CLUSTER_CONFIG', file_name) - - -def patch_broker_url(test_case): - """Patch `BROKER_URL` with a fake. Returns the fake value.""" - fake = factory.make_name('fake_broker_url') - celery_conf = app_or_default().conf - test_case.patch(celery_conf, 'BROKER_URL', fake) - return fake - - -def make_register_request(uuid): - """Create a fake register() request.""" - request = RequestFactory().post( - reverse('nodegroups_handler'), - {'op': 'register', 'uuid': uuid}) - # Piston sets request.data like this. Our API code needs it. - request.data = request.POST - return request - - -class TestRegisterNodegroup(MAASServerTestCase): - """Tests for `register_nodegroup`.""" - - def test_creates_pending_nodegroup_by_default(self): - create_configured_master() - uuid = factory.getRandomUUID() - request = make_register_request(uuid) - - nodegroup = api.register_nodegroup(request, uuid) - - self.assertEqual(uuid, nodegroup.uuid) - self.assertEqual(NODEGROUP_STATUS.PENDING, nodegroup.status) - self.assertNotEqual(NodeGroup.objects.ensure_master().id, nodegroup.id) - - def test_registers_as_master_if_master_not_configured(self): - reset_master() - uuid = factory.getRandomUUID() - request = make_register_request(uuid) - - nodegroup = api.register_nodegroup(request, uuid) - - self.assertEqual(uuid, nodegroup.uuid) - self.assertEqual(NODEGROUP_STATUS.PENDING, nodegroup.status) - self.assertEqual(NodeGroup.objects.ensure_master().id, nodegroup.id) - - def test_updates_and_accepts_local_master_if_master_not_configured(self): - reset_master() - uuid = factory.getRandomUUID() - create_local_cluster_config(self, uuid) - request = make_register_request(uuid) - - nodegroup = api.register_nodegroup(request, uuid) - - self.assertEqual(uuid, nodegroup.uuid) - self.assertEqual(NODEGROUP_STATUS.ACCEPTED, nodegroup.status) - self.assertEqual(NodeGroup.objects.ensure_master().id, nodegroup.id) - - def test_keeps_local_cluster_controller_pending_if_master_configured(self): - create_configured_master() - uuid = factory.getRandomUUID() - create_local_cluster_config(self, uuid) - request = make_register_request(uuid) - - nodegroup = api.register_nodegroup(request, uuid) - - self.assertEqual(uuid, nodegroup.uuid) - self.assertEqual(NODEGROUP_STATUS.PENDING, nodegroup.status) - self.assertNotEqual(NodeGroup.objects.ensure_master().id, nodegroup.id) - - def test_rejects_duplicate_uuid(self): - nodegroup = factory.make_node_group() - request = make_register_request(nodegroup.uuid) - - self.assertRaises( - ValidationError, api.register_nodegroup, request, nodegroup.uuid) - - -class TestComposeNodegroupRegisterResponse(MAASServerTestCase): - """Tests for `compose_nodegroup_register_response`.""" - - def test_returns_credentials_if_accepted(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - existed = factory.getRandomBoolean() - self.assertEqual( - api.get_celery_credentials(), - api.compose_nodegroup_register_response(nodegroup, existed)) - - def test_credentials_contain_broker_url(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - broker_url = patch_broker_url(self) - existed = factory.getRandomBoolean() - - response = api.compose_nodegroup_register_response(nodegroup, existed) - - self.assertEqual({'BROKER_URL': broker_url}, response) - - def test_returns_forbidden_if_rejected(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.REJECTED) - already_existed = factory.getRandomBoolean() - - with ExpectedException(PermissionDenied, "Rejected cluster."): - api.compose_nodegroup_register_response(nodegroup, already_existed) - - def test_returns_accepted_for_new_pending_nodegroup(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) - response = api.compose_nodegroup_register_response( - nodegroup, already_existed=False) - self.assertEqual( - (httplib.ACCEPTED, - "Cluster registered. Awaiting admin approval."), - (response.status_code, response.content)) - - def test_returns_accepted_for_existing_pending_nodegroup(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) - response = api.compose_nodegroup_register_response( - nodegroup, already_existed=True) - self.assertEqual( - (httplib.ACCEPTED, "Awaiting admin approval."), - (response.status_code, response.content)) - - -class TestRegisterAPI(MAASServerTestCase): - """Tests for the `register` method on the API. - - This method can be called anonymously. - """ - - def test_register_creates_nodegroup_and_interfaces(self): - create_configured_master() - name = factory.make_name('cluster') - uuid = factory.getRandomUUID() - interface = make_interface_settings() - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'name': name, - 'uuid': uuid, - 'interfaces': json.dumps([interface]), - }) - nodegroup = NodeGroup.objects.get(uuid=uuid) - # The nodegroup was created with its interface. Its status is - # 'PENDING'. - self.assertEqual( - (name, NODEGROUP_STATUS.PENDING), - (nodegroup.name, nodegroup.status)) - self.assertThat( - nodegroup.nodegroupinterface_set.all()[0], - MatchesStructure.byEquality(**interface)) - # The response code is 'ACCEPTED': the nodegroup now needs to be - # validated by an admin. - self.assertEqual(httplib.ACCEPTED, response.status_code) - - def test_register_auto_accepts_local_master(self): - reset_master() - name = factory.make_name('cluster') - uuid = factory.getRandomUUID() - create_local_cluster_config(self, uuid) - patch_broker_url(self) - - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'name': name, - 'uuid': uuid, - }) - self.assertEqual(httplib.OK, response.status_code, response) - - master = NodeGroup.objects.ensure_master() - # The cluster controller that made the request is registered as the - # master, since there was none. - self.assertEqual((uuid, name), (master.uuid, master.name)) - # It is also auto-accepted. - self.assertEqual(NODEGROUP_STATUS.ACCEPTED, master.status) - - def test_register_configures_master_if_unconfigured(self): - reset_master() - name = factory.make_name('cluster') - uuid = factory.getRandomUUID() - create_local_cluster_config(self, uuid) - interface = make_interface_settings() - - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'name': name, - 'uuid': uuid, - 'interfaces': json.dumps([interface]), - }) - self.assertEqual(httplib.OK, response.status_code, response) - - master = NodeGroup.objects.ensure_master() - self.assertEqual(NODEGROUP_STATUS.ACCEPTED, master.status) - self.assertThat( - master.nodegroupinterface_set.get( - interface=interface['interface']), - MatchesStructure.byEquality(**interface)) - - def test_register_nodegroup_uses_default_zone_name(self): - uuid = factory.getRandomUUID() - create_local_cluster_config(self, uuid) - - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'uuid': uuid, - }) - self.assertEqual(httplib.OK, response.status_code, response) - - master = NodeGroup.objects.ensure_master() - self.assertEqual( - (NODEGROUP_STATUS.ACCEPTED, DEFAULT_DNS_ZONE_NAME), - (master.status, master.name)) - - def test_register_nodegroup_validates_data(self): - create_configured_master() - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'name': factory.make_name('cluster'), - 'uuid': factory.getRandomUUID(), - 'interfaces': 'invalid data', - }) - self.assertEqual( - ( - httplib.BAD_REQUEST, - {'interfaces': ['Invalid json value.']}, - ), - (response.status_code, json.loads(response.content))) - - def test_register_nodegroup_twice_does_not_update_nodegroup(self): - create_configured_master() - nodegroup = factory.make_node_group() - nodegroup.status = NODEGROUP_STATUS.PENDING - nodegroup.save() - name = factory.make_name('cluster') - uuid = nodegroup.uuid - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'name': name, - 'uuid': uuid, - }) - new_nodegroup = NodeGroup.objects.get(uuid=uuid) - self.assertEqual( - (nodegroup.name, NODEGROUP_STATUS.PENDING), - (new_nodegroup.name, new_nodegroup.status)) - # The response code is 'ACCEPTED': the nodegroup still needs to be - # validated by an admin. - self.assertEqual(httplib.ACCEPTED, response.status_code) - - def test_register_returns_compose_nodegroup_register_response(self): - # register() returns whatever compose_nodegroup_register_response() - # tells it to return. - expected_response = factory.getRandomString() - self.patch( - api, 'compose_nodegroup_register_response', - Mock(return_value=expected_response)) - - response = self.client.post( - reverse('nodegroups_handler'), - { - 'op': 'register', - 'name': factory.make_name('cluster'), - 'uuid': factory.getRandomUUID(), - }) - - self.assertIn('application/json', response['Content-Type']) - self.assertEqual(expected_response, json.loads(response.content)) - - def test_register_new_nodegroup_does_not_record_maas_url(self): - # When registering a cluster, the URL with which the call was made - # (i.e. from the perspective of the cluster) is *not* recorded. - create_configured_master() - name = factory.make_name('cluster') - uuid = factory.getRandomUUID() - update_maas_url = self.patch(api, "update_nodegroup_maas_url") - response = self.client.post( - reverse('nodegroups_handler'), - {'op': 'register', 'name': name, 'uuid': uuid}) - self.assertEqual(httplib.ACCEPTED, response.status_code, response) - self.assertEqual([], update_maas_url.call_args_list) - - def test_register_accepted_nodegroup_updates_maas_url(self): - # When registering an existing, accepted, cluster, the MAAS URL we give - # it in the future is updated to the one on which the call was made. - create_configured_master() - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - update_maas_url = self.patch(api, "update_nodegroup_maas_url") - response = self.client.post( - reverse('nodegroups_handler'), - {'op': 'register', 'uuid': nodegroup.uuid}) - self.assertEqual(httplib.OK, response.status_code, response) - self.assertThat(update_maas_url, MockCalledOnceWith(nodegroup, ANY)) - - def test_register_pending_nodegroup_does_not_update_maas_url(self): - # When registering an existing, pending cluster, the MAAS URL we give - # it in the future is *not* updated to the one on which the call was - # made. - create_configured_master() - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) - update_maas_url = self.patch(api, "update_nodegroup_maas_url") - response = self.client.post( - reverse('nodegroups_handler'), - {'op': 'register', 'uuid': nodegroup.uuid}) - self.assertEqual(httplib.ACCEPTED, response.status_code, response) - self.assertEqual([], update_maas_url.call_args_list) - - def test_register_rejected_nodegroup_does_not_update_maas_url(self): - # When registering an existing, rejected cluster, the MAAS URL we give - # it in the future is *not* updated to the one on which the call was - # made. - create_configured_master() - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.REJECTED) - update_maas_url = self.patch(api, "update_nodegroup_maas_url") - response = self.client.post( - reverse('nodegroups_handler'), - {'op': 'register', 'uuid': nodegroup.uuid}) - self.assertEqual(httplib.FORBIDDEN, response.status_code, response) - self.assertEqual([], update_maas_url.call_args_list) - - def test_register_master_nodegroup_does_not_update_maas_url(self): - # When registering the master cluster, the MAAS URL we give it in - # the future is *not* updated to the one on which the call was made. - reset_master() - name = factory.make_name('cluster') - uuid = factory.getRandomUUID() - create_local_cluster_config(self, uuid) - update_maas_url = self.patch(api, "update_nodegroup_maas_url") - response = self.client.post( - reverse('nodegroups_handler'), - {'op': 'register', 'name': name, 'uuid': uuid}) - self.assertEqual(httplib.OK, response.status_code, response) - # This really did configure the master. - master = NodeGroup.objects.ensure_master() - self.assertEqual(uuid, master.uuid) - self.assertEqual([], update_maas_url.call_args_list) - # The master's maas_url field remains empty. - self.assertEqual("", master.maas_url) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_support.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_support.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_support.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_support.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for API helpers.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - - -from collections import namedtuple -import httplib - -from django.core.exceptions import PermissionDenied -from django.core.urlresolvers import reverse -from maasserver.api_support import admin_method -from maasserver.models.config import ( - Config, - ConfigManager, - ) -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory -from mock import ( - call, - Mock, - ) - - -class TestOperationsResource(APITestCase): - - def test_type_error_is_not_hidden(self): - # This tests that bug #1228205 is fixed (i.e. that a - # TypeError is properly reported and not swallowed by - # piston). - - # Create a valid configuration item. - name = 'maas_name' - value = factory.getRandomString() - Config.objects.set_config(name, value) - # Patch ConfigManager.get_config so that it will raise a - # TypeError exception. - self.patch(ConfigManager, "get_config", Mock(side_effect=TypeError)) - self.become_admin() - response = self.client.get( - reverse('maas_handler'), - { - 'op': 'get_config', - 'name': name, - }) - self.assertEqual( - httplib.INTERNAL_SERVER_ERROR, response.status_code, - response.content) - - -class TestAdminMethodDecorator(APITestCase): - - def test_non_admin_are_rejected(self): - FakeRequest = namedtuple('FakeRequest', ['user']) - request = FakeRequest(user=factory.make_user()) - mock = Mock() - - @admin_method - def api_method(self, request): - return mock() - - self.assertRaises(PermissionDenied, api_method, 'self', request) - self.assertEqual([], mock.mock_calls) - - def test_admin_can_call_method(self): - FakeRequest = namedtuple('FakeRequest', ['user']) - request = FakeRequest(user=factory.make_admin()) - return_value = factory.make_name('return') - mock = Mock(return_value=return_value) - - @admin_method - def api_method(self, request): - return mock() - - response = api_method('self', request) - self.assertEqual( - (return_value, [call()]), - (response, mock.mock_calls)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_tag.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_tag.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_tag.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_tag.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,507 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the Tags API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.enum import NODE_STATUS -from maasserver.models import Tag -from maasserver.models.node import generate_node_system_id -from maasserver.testing import reload_object -from maasserver.testing.api import ( - APITestCase, - make_worker_client, - ) -from maasserver.testing.factory import factory -from maasserver.testing.oauthclient import OAuthAuthenticatedClient -from metadataserver.models.commissioningscript import inject_lshw_result - - -class TestTagAPI(APITestCase): - """Tests for /api/1.0/tags//.""" - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/tags/tag-name/', - reverse('tag_handler', args=['tag-name'])) - - def get_tag_uri(self, tag): - """Get the API URI for `tag`.""" - return reverse('tag_handler', args=[tag.name]) - - def test_DELETE_requires_admin(self): - tag = factory.make_tag() - response = self.client.delete(self.get_tag_uri(tag)) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertItemsEqual([tag], Tag.objects.filter(id=tag.id)) - - def test_DELETE_removes_tag(self): - self.become_admin() - tag = factory.make_tag() - response = self.client.delete(self.get_tag_uri(tag)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertFalse(Tag.objects.filter(id=tag.id).exists()) - - def test_DELETE_404(self): - self.become_admin() - url = reverse('tag_handler', args=['no-tag']) - response = self.client.delete(url) - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_GET_returns_tag(self): - # The api allows for fetching a single Node (using system_id). - tag = factory.make_tag('tag-name') - url = reverse('tag_handler', args=['tag-name']) - response = self.client.get(url) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(tag.name, parsed_result['name']) - self.assertEqual(tag.definition, parsed_result['definition']) - self.assertEqual(tag.comment, parsed_result['comment']) - - def test_GET_refuses_to_access_nonexistent_node(self): - # When fetching a Tag, the api returns a 'Not Found' (404) error - # if no tag is found. - url = reverse('tag_handler', args=['no-such-tag']) - response = self.client.get(url) - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_PUT_refuses_non_superuser(self): - tag = factory.make_tag() - response = self.client_put( - self.get_tag_uri(tag), {'comment': 'A special comment'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_PUT_updates_tag(self): - self.become_admin() - tag = factory.make_tag() - # Note that 'definition' is not being sent - response = self.client_put( - self.get_tag_uri(tag), - {'name': 'new-tag-name', 'comment': 'A random comment'}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual('new-tag-name', parsed_result['name']) - self.assertEqual('A random comment', parsed_result['comment']) - self.assertEqual(tag.definition, parsed_result['definition']) - self.assertFalse(Tag.objects.filter(name=tag.name).exists()) - self.assertTrue(Tag.objects.filter(name='new-tag-name').exists()) - - def test_PUT_updates_node_associations(self): - node1 = factory.make_node() - inject_lshw_result(node1, b'') - node2 = factory.make_node() - inject_lshw_result(node2, b'') - tag = factory.make_tag(definition='//node/foo') - self.assertItemsEqual([tag.name], node1.tag_names()) - self.assertItemsEqual([], node2.tag_names()) - self.become_admin() - response = self.client_put( - self.get_tag_uri(tag), - {'definition': '//node/bar'}) - self.assertEqual(httplib.OK, response.status_code) - self.assertItemsEqual([], node1.tag_names()) - self.assertItemsEqual([tag.name], node2.tag_names()) - - def test_GET_nodes_with_no_nodes(self): - tag = factory.make_tag() - response = self.client.get(self.get_tag_uri(tag), {'op': 'nodes'}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual([], parsed_result) - - def test_GET_nodes_returns_nodes(self): - tag = factory.make_tag() - node1 = factory.make_node() - # Create a second node that isn't tagged. - factory.make_node() - node1.tags.add(tag) - response = self.client.get(self.get_tag_uri(tag), {'op': 'nodes'}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual([node1.system_id], - [r['system_id'] for r in parsed_result]) - - def test_GET_nodes_hides_invisible_nodes(self): - user2 = factory.make_user() - node1 = factory.make_node() - inject_lshw_result(node1, b'') - node2 = factory.make_node(status=NODE_STATUS.ALLOCATED, owner=user2) - inject_lshw_result(node2, b'') - tag = factory.make_tag(definition='//node') - response = self.client.get(self.get_tag_uri(tag), {'op': 'nodes'}) - - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual([node1.system_id], - [r['system_id'] for r in parsed_result]) - # However, for the other user, they should see the result - client2 = OAuthAuthenticatedClient(user2) - response = client2.get(self.get_tag_uri(tag), {'op': 'nodes'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertItemsEqual([node1.system_id, node2.system_id], - [r['system_id'] for r in parsed_result]) - - def test_PUT_invalid_definition(self): - self.become_admin() - node = factory.make_node() - inject_lshw_result(node, b'') - tag = factory.make_tag(definition='//child') - self.assertItemsEqual([tag.name], node.tag_names()) - response = self.client_put( - self.get_tag_uri(tag), {'definition': 'invalid::tag'}) - - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - # The tag should not be modified - tag = reload_object(tag) - self.assertItemsEqual([tag.name], node.tag_names()) - self.assertEqual('//child', tag.definition) - - def test_POST_update_nodes_unknown_tag(self): - self.become_admin() - name = factory.make_name() - response = self.client.post( - reverse('tag_handler', args=[name]), - {'op': 'update_nodes'}) - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_POST_update_nodes_changes_associations(self): - tag = factory.make_tag() - self.become_admin() - node_first = factory.make_node() - node_second = factory.make_node() - node_first.tags.add(tag) - self.assertItemsEqual([node_first], tag.node_set.all()) - response = self.client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node_second.system_id], - 'remove': [node_first.system_id], - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertItemsEqual([node_second], tag.node_set.all()) - self.assertEqual({'added': 1, 'removed': 1}, parsed_result) - - def test_POST_update_nodes_ignores_unknown_nodes(self): - tag = factory.make_tag() - self.become_admin() - unknown_add_system_id = generate_node_system_id() - unknown_remove_system_id = generate_node_system_id() - self.assertItemsEqual([], tag.node_set.all()) - response = self.client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [unknown_add_system_id], - 'remove': [unknown_remove_system_id], - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertItemsEqual([], tag.node_set.all()) - self.assertEqual({'added': 0, 'removed': 0}, parsed_result) - - def test_POST_update_nodes_doesnt_require_add_or_remove(self): - tag = factory.make_tag() - node = factory.make_node() - self.become_admin() - self.assertItemsEqual([], tag.node_set.all()) - response = self.client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node.system_id], - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual({'added': 1, 'removed': 0}, parsed_result) - response = self.client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'remove': [node.system_id], - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual({'added': 0, 'removed': 1}, parsed_result) - - def test_POST_update_nodes_rejects_normal_user(self): - tag = factory.make_tag() - node = factory.make_node() - response = self.client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node.system_id], - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertItemsEqual([], tag.node_set.all()) - - def test_POST_update_nodes_allows_nodegroup_worker(self): - tag = factory.make_tag() - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - client = make_worker_client(nodegroup) - response = client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node.system_id], - 'nodegroup': nodegroup.uuid, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual({'added': 1, 'removed': 0}, parsed_result) - self.assertItemsEqual([node], tag.node_set.all()) - - def test_POST_update_nodes_refuses_unidentified_nodegroup_worker(self): - tag = factory.make_tag() - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - client = make_worker_client(nodegroup) - # We don't pass nodegroup:uuid so we get refused - response = client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node.system_id], - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertItemsEqual([], tag.node_set.all()) - - def test_POST_update_nodes_refuses_non_nodegroup_worker(self): - tag = factory.make_tag() - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - response = self.client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node.system_id], - 'nodegroup': nodegroup.uuid, - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertItemsEqual([], tag.node_set.all()) - - def test_POST_update_nodes_doesnt_modify_other_nodegroup_nodes(self): - tag = factory.make_tag() - nodegroup_mine = factory.make_node_group() - nodegroup_theirs = factory.make_node_group() - node_theirs = factory.make_node(nodegroup=nodegroup_theirs) - client = make_worker_client(nodegroup_mine) - response = client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node_theirs.system_id], - 'nodegroup': nodegroup_mine.uuid, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual({'added': 0, 'removed': 0}, parsed_result) - self.assertItemsEqual([], tag.node_set.all()) - - def test_POST_update_nodes_ignores_incorrect_definition(self): - tag = factory.make_tag() - orig_def = tag.definition - nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=nodegroup) - client = make_worker_client(nodegroup) - tag.definition = '//new/node/definition' - tag.save() - response = client.post( - self.get_tag_uri(tag), { - 'op': 'update_nodes', - 'add': [node.system_id], - 'nodegroup': nodegroup.uuid, - 'definition': orig_def, - }) - self.assertEqual(httplib.CONFLICT, response.status_code) - self.assertItemsEqual([], tag.node_set.all()) - self.assertItemsEqual([], node.tags.all()) - - def test_POST_rebuild_rebuilds_node_mapping(self): - tag = factory.make_tag(definition='//foo/bar') - # Only one node matches the tag definition, rebuilding should notice - node_matching = factory.make_node() - inject_lshw_result(node_matching, b'') - node_bogus = factory.make_node() - inject_lshw_result(node_bogus, b'') - node_matching.tags.add(tag) - node_bogus.tags.add(tag) - self.assertItemsEqual( - [node_matching, node_bogus], tag.node_set.all()) - self.become_admin() - response = self.client.post(self.get_tag_uri(tag), {'op': 'rebuild'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual({'rebuilding': tag.name}, parsed_result) - self.assertItemsEqual([node_matching], tag.node_set.all()) - - def test_POST_rebuild_leaves_manual_tags(self): - tag = factory.make_tag(definition='') - node = factory.make_node() - node.tags.add(tag) - self.assertItemsEqual([node], tag.node_set.all()) - self.become_admin() - response = self.client.post(self.get_tag_uri(tag), {'op': 'rebuild'}) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual({'rebuilding': tag.name}, parsed_result) - self.assertItemsEqual([node], tag.node_set.all()) - - def test_POST_rebuild_unknown_404(self): - self.become_admin() - response = self.client.post( - reverse('tag_handler', args=['unknown-tag']), - {'op': 'rebuild'}) - self.assertEqual(httplib.NOT_FOUND, response.status_code) - - def test_POST_rebuild_requires_admin(self): - tag = factory.make_tag(definition='/foo/bar') - response = self.client.post( - self.get_tag_uri(tag), {'op': 'rebuild'}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - -class TestTagsAPI(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/tags/', reverse('tags_handler')) - - def test_GET_list_without_tags_returns_empty_list(self): - response = self.client.get(reverse('tags_handler'), {'op': 'list'}) - self.assertItemsEqual([], json.loads(response.content)) - - def test_POST_new_refuses_non_admin(self): - name = factory.getRandomString() - response = self.client.post( - reverse('tags_handler'), - { - 'op': 'new', - 'name': name, - 'comment': factory.getRandomString(), - 'definition': factory.getRandomString(), - }) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - self.assertFalse(Tag.objects.filter(name=name).exists()) - - def test_POST_new_creates_tag(self): - self.become_admin() - name = factory.getRandomString() - definition = '//node' - comment = factory.getRandomString() - response = self.client.post( - reverse('tags_handler'), - { - 'op': 'new', - 'name': name, - 'comment': comment, - 'definition': definition, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(name, parsed_result['name']) - self.assertEqual(comment, parsed_result['comment']) - self.assertEqual(definition, parsed_result['definition']) - self.assertTrue(Tag.objects.filter(name=name).exists()) - - def test_POST_new_without_definition_creates_tag(self): - self.become_admin() - name = factory.getRandomString() - comment = factory.getRandomString() - response = self.client.post( - reverse('tags_handler'), - { - 'op': 'new', - 'name': name, - 'comment': comment, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(name, parsed_result['name']) - self.assertEqual(comment, parsed_result['comment']) - self.assertEqual("", parsed_result['definition']) - self.assertTrue(Tag.objects.filter(name=name).exists()) - - def test_POST_new_invalid_tag_name(self): - self.become_admin() - # We do not check the full possible set of invalid names here, a more - # thorough check is done in test_tag, we just check that we get a - # reasonable error here. - invalid = 'invalid:name' - definition = '//node' - comment = factory.getRandomString() - response = self.client.post( - reverse('tags_handler'), - { - 'op': 'new', - 'name': invalid, - 'comment': comment, - 'definition': definition, - }) - self.assertEqual( - httplib.BAD_REQUEST, response.status_code, - 'We did not get BAD_REQUEST for an invalid tag name: %r' - % (invalid,)) - self.assertFalse(Tag.objects.filter(name=invalid).exists()) - - def test_POST_new_kernel_opts(self): - self.become_admin() - name = factory.getRandomString() - definition = '//node' - comment = factory.getRandomString() - extra_kernel_opts = factory.getRandomString() - response = self.client.post( - reverse('tags_handler'), - { - 'op': 'new', - 'name': name, - 'comment': comment, - 'definition': definition, - 'kernel_opts': extra_kernel_opts, - }) - self.assertEqual(httplib.OK, response.status_code) - parsed_result = json.loads(response.content) - self.assertEqual(name, parsed_result['name']) - self.assertEqual(comment, parsed_result['comment']) - self.assertEqual(definition, parsed_result['definition']) - self.assertEqual(extra_kernel_opts, parsed_result['kernel_opts']) - self.assertEqual( - extra_kernel_opts, Tag.objects.filter(name=name)[0].kernel_opts) - - def test_POST_new_populates_nodes(self): - self.become_admin() - node1 = factory.make_node() - inject_lshw_result(node1, b'') - # Create another node that doesn't have a 'child' - node2 = factory.make_node() - inject_lshw_result(node2, b'') - self.assertItemsEqual([], node1.tag_names()) - self.assertItemsEqual([], node2.tag_names()) - name = factory.getRandomString() - definition = '//node/child' - comment = factory.getRandomString() - response = self.client.post( - reverse('tags_handler'), - { - 'op': 'new', - 'name': name, - 'comment': comment, - 'definition': definition, - }) - self.assertEqual(httplib.OK, response.status_code) - self.assertItemsEqual([name], node1.tag_names()) - self.assertItemsEqual([], node2.tag_names()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_user.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_user.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_user.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_user.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the user accounts API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.contrib.auth.models import User -from django.core.urlresolvers import reverse -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory -from testtools.matchers import ContainsAll - - -class TestUsers(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/users/', reverse('users_handler')) - - def test_POST_creates_user(self): - self.become_admin() - username = factory.make_name('user') - email = factory.getRandomEmail() - password = factory.getRandomString() - - response = self.client.post( - reverse('users_handler'), - { - 'username': username, - 'email': email, - 'password': password, - 'is_superuser': '0', - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - - self.assertEqual(username, json.loads(response.content)['username']) - created_user = User.objects.get(username=username) - self.assertEqual( - (email, False), - (created_user.email, created_user.is_superuser)) - - def test_POST_creates_admin(self): - self.become_admin() - username = factory.make_name('user') - email = factory.getRandomEmail() - password = factory.getRandomString() - - response = self.client.post( - reverse('users_handler'), - { - 'username': username, - 'email': email, - 'password': password, - 'is_superuser': '1', - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - - self.assertEqual(username, json.loads(response.content)['username']) - created_user = User.objects.get(username=username) - self.assertEqual( - (email, True), - (created_user.email, created_user.is_superuser)) - - def test_POST_requires_admin(self): - response = self.client.post( - reverse('users_handler'), - { - 'username': factory.make_name('user'), - 'email': factory.getRandomEmail(), - 'password': factory.getRandomString(), - 'is_superuser': '1' if factory.getRandomBoolean() else '0', - }) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_GET_lists_users(self): - users = [factory.make_user() for counter in range(2)] - - response = self.client.get(reverse('users_handler')) - self.assertEqual(httplib.OK, response.status_code, response.content) - - listing = json.loads(response.content) - self.assertThat( - [user['username'] for user in listing], - ContainsAll([user.username for user in users])) - - def test_GET_orders_by_name(self): - # Create some users. Give them lower-case names, because collation - # algorithms may differ on how mixed-case names should be sorted. - # The implementation may sort in the database or in Python code, and - # the two may use different collations. - users = [factory.make_name('user').lower() for counter in range(5)] - for user in users: - factory.make_user(username=user) - - response = self.client.get(reverse('users_handler')) - self.assertEqual(httplib.OK, response.status_code, response.content) - - listing = json.loads(response.content) - # The listing may also contain built-in users and/or a test user. - # Restrict it to the users we created ourselves. - users_as_returned = [ - user['username'] for user in listing if user['username'] in users - ] - self.assertEqual(sorted(users), users_as_returned) - - -class TestUser(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/users/username/', - reverse('user_handler', args=['username'])) - - def test_GET_finds_user(self): - user = factory.make_user() - - response = self.client.get( - reverse('user_handler', args=[user.username])) - self.assertEqual(httplib.OK, response.status_code, response.content) - - returned_user = json.loads(response.content) - self.assertEqual(user.username, returned_user['username']) - self.assertEqual(user.email, returned_user['email']) - self.assertFalse(returned_user['is_superuser']) - - def test_GET_shows_expected_fields(self): - user = factory.make_user() - - response = self.client.get( - reverse('user_handler', args=[user.username])) - self.assertEqual(httplib.OK, response.status_code, response.content) - - returned_user = json.loads(response.content) - self.assertItemsEqual( - ['username', 'email', 'is_superuser'], - returned_user.keys()) - - def test_GET_identifies_superuser_as_such(self): - user = factory.make_admin() - - response = self.client.get( - reverse('user_handler', args=[user.username])) - self.assertEqual(httplib.OK, response.status_code, response.content) - - self.assertTrue(json.loads(response.content)['is_superuser']) - - def test_GET_returns_404_if_user_not_found(self): - nonuser = factory.make_name('nonuser') - response = self.client.get(reverse('user_handler', args=[nonuser])) - self.assertEqual( - httplib.NOT_FOUND, response.status_code, response.status_code) - self.assertItemsEqual([], User.objects.filter(username=nonuser)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_utils.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_utils.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for API helpers.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from collections import namedtuple - -from django.http import QueryDict -from maasserver.api_utils import ( - extract_bool, - extract_oauth_key, - extract_oauth_key_from_auth_header, - get_oauth_token, - get_overridden_query_dict, - ) -from maasserver.exceptions import Unauthorized -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase - - -class TestExtractBool(MAASServerTestCase): - def test_asserts_against_raw_bytes(self): - self.assertRaises(AssertionError, extract_bool, b'0') - - def test_asserts_against_None(self): - self.assertRaises(AssertionError, extract_bool, None) - - def test_asserts_against_number(self): - self.assertRaises(AssertionError, extract_bool, 0) - - def test_0_means_False(self): - self.assertEquals(extract_bool('0'), False) - - def test_1_means_True(self): - self.assertEquals(extract_bool('1'), True) - - def test_rejects_other_numeric_strings(self): - self.assertRaises(ValueError, extract_bool, '00') - self.assertRaises(ValueError, extract_bool, '2') - self.assertRaises(ValueError, extract_bool, '-1') - - def test_rejects_empty_string(self): - self.assertRaises(ValueError, extract_bool, '') - - -class TestGetOverridedQueryDict(MAASServerTestCase): - - def test_returns_QueryDict(self): - fields = [factory.make_name('field')] - defaults = {fields[0]: factory.make_name('field')} - results = get_overridden_query_dict(defaults, QueryDict(''), fields) - expected_results = QueryDict('').copy() - expected_results.update(defaults) - self.assertEqual(expected_results, results) - - def test_data_values_override_defaults(self): - key = factory.make_name('key') - defaults = {key: factory.make_name('key')} - data_value = factory.make_name('value') - data = {key: data_value} - results = get_overridden_query_dict(defaults, data, [key]) - self.assertEqual([data_value], results.getlist(key)) - - def test_takes_multiple_values_in_default_parameters(self): - values = [factory.make_name('value') for i in range(2)] - key = factory.make_name('key') - defaults = {key: values} - results = get_overridden_query_dict(defaults, {}, [key]) - self.assertEqual(values, results.getlist(key)) - - def test_querydict_data_values_override_defaults(self): - key = factory.make_name('key') - defaults = {key: factory.make_name('name')} - data_values = [factory.make_name('value') for i in range(2)] - data = QueryDict('').copy() - data.setlist(key, data_values) - results = get_overridden_query_dict(defaults, data, [key]) - self.assertEqual(data_values, results.getlist(key)) - - def test_fields_filter_results(self): - key1 = factory.getRandomString() - key2 = factory.getRandomString() - defaults = { - key1: factory.getRandomString(), - key2: factory.getRandomString(), - } - data_value1 = factory.getRandomString() - data_value2 = factory.getRandomString() - data = {key1: data_value1, key2: data_value2} - results = get_overridden_query_dict(defaults, data, [key1]) - self.assertEqual([data_value2], results.getlist(key2)) - - -class TestOAuthHelpers(MAASServerTestCase): - - def make_fake_request(self, auth_header): - """Create a very simple fake request, with just an auth header.""" - FakeRequest = namedtuple('FakeRequest', ['META']) - return FakeRequest(META={'HTTP_AUTHORIZATION': auth_header}) - - def test_extract_oauth_key_from_auth_header_returns_key(self): - token = factory.getRandomString(18) - self.assertEqual( - token, - extract_oauth_key_from_auth_header( - factory.make_oauth_header(oauth_token=token))) - - def test_extract_oauth_key_from_auth_header_returns_None_if_missing(self): - self.assertIs(None, extract_oauth_key_from_auth_header('')) - - def test_extract_oauth_key_raises_Unauthorized_if_no_auth_header(self): - self.assertRaises( - Unauthorized, - extract_oauth_key, self.make_fake_request(None)) - - def test_extract_oauth_key_raises_Unauthorized_if_no_key(self): - self.assertRaises( - Unauthorized, - extract_oauth_key, self.make_fake_request('')) - - def test_extract_oauth_key_returns_key(self): - token = factory.getRandomString(18) - self.assertEqual( - token, - extract_oauth_key(self.make_fake_request( - factory.make_oauth_header(oauth_token=token)))) - - def test_get_oauth_token_finds_token(self): - user = factory.make_user() - consumer, token = user.get_profile().create_authorisation_token() - self.assertEqual( - token, - get_oauth_token( - self.make_fake_request( - factory.make_oauth_header(oauth_token=token.key)))) - - def test_get_oauth_token_raises_Unauthorized_for_unknown_token(self): - fake_token = factory.getRandomString(18) - header = factory.make_oauth_header(oauth_token=fake_token) - self.assertRaises( - Unauthorized, - get_oauth_token, self.make_fake_request(header)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_version.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_version.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_version.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_version.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test maasserver API version.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.api import API_CAPABILITIES_LIST -from maasserver.testing.testcase import MAASServerTestCase - - -class TestFindingResources(MAASServerTestCase): - """Tests for /version/ API.""" - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/version/', reverse('version')) - - def test_GET_returns_details(self): - response = self.client.get(reverse('version')) - self.assertEqual(httplib.OK, response.status_code) - - parsed_result = json.loads(response.content) - self.assertEqual( - { - 'capabilities': API_CAPABILITIES_LIST, - }, - parsed_result) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_zone.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_zone.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_zone.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_zone.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,167 +0,0 @@ -# Copyright 2013-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for physical `Zone` API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.models import Zone -from maasserver.models.zone import DEFAULT_ZONE_NAME -from maasserver.testing import reload_object -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory - - -def get_zone_uri(zone): - """Return a zone's URI on the API.""" - return reverse('zone_handler', args=[zone.name]) - - -class TestZoneAPI(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/zones/name/', reverse('zone_handler', args=['name'])) - - def test_POST_is_prohibited(self): - self.become_admin() - zone = factory.make_zone() - response = self.client.post( - get_zone_uri(zone), - { - 'name': zone.name, - 'description': zone.description, - }) - self.assertEqual(httplib.METHOD_NOT_ALLOWED, response.status_code) - - def test_GET_returns_zone(self): - zone = factory.make_zone() - response = self.client.get(get_zone_uri(zone)) - self.assertEqual(httplib.OK, response.status_code) - returned_zone = json.loads(response.content) - self.assertEqual( - (zone.name, zone.description), - (returned_zone['name'], returned_zone['description'])) - - def test_PUT_updates_zone(self): - self.become_admin() - zone = factory.make_zone() - new_description = factory.getRandomString() - - response = self.client_put( - get_zone_uri(zone), - {'description': new_description}) - self.assertEqual(httplib.OK, response.status_code) - - zone = reload_object(zone) - self.assertEqual(new_description, zone.description) - - def test_PUT_requires_admin(self): - zone = factory.make_zone() - response = self.client_put( - get_zone_uri(zone), - {'description': factory.getRandomString()}) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_PUT_updates_zone_name(self): - self.become_admin() - zone = factory.make_zone() - new_name = factory.make_name('zone-new') - - response = self.client_put(get_zone_uri(zone), {'name': new_name}) - self.assertEqual(httplib.OK, response.status_code) - - zone = reload_object(zone) - self.assertEqual(new_name, zone.name) - - def test_PUT_rejects_change_of_default_zone_name(self): - self.become_admin() - zone = Zone.objects.get_default_zone() - - response = self.client_put( - get_zone_uri(zone), - {'name': factory.make_name('zone')}) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - zone = reload_object(zone) - self.assertEqual(DEFAULT_ZONE_NAME, zone.name) - - def test_PUT_changing_name_maintains_foreign_keys(self): - self.become_admin() - zone = factory.make_zone() - node = factory.make_node(zone=zone) - - response = self.client_put( - get_zone_uri(zone), - {'name': factory.make_name('new')}) - self.assertEqual(httplib.OK, response.status_code) - - node = reload_object(node) - zone = reload_object(zone) - self.assertEqual(zone, node.zone) - - def test_DELETE_removes_zone(self): - self.become_admin() - zone = factory.make_zone() - response = self.client.delete(get_zone_uri(zone)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - self.assertIsNone(reload_object(zone)) - - def test_DELETE_rejects_deletion_of_default_zone(self): - self.become_admin() - response = self.client.delete( - get_zone_uri(Zone.objects.get_default_zone())) - self.assertEqual(httplib.BAD_REQUEST, response.status_code) - self.assertIsNotNone(Zone.objects.get_default_zone()) - - def test_DELETE_requires_admin(self): - zone = factory.make_zone() - response = self.client.delete(get_zone_uri(zone)) - self.assertEqual(httplib.FORBIDDEN, response.status_code) - - def test_DELETE_cannot_delete_default_zone(self): - self.become_admin() - zone = Zone.objects.get_default_zone() - - response = self.client.delete(get_zone_uri(zone)) - - self.assertEqual( - ( - httplib.BAD_REQUEST, - "This zone is the default zone, it cannot be deleted.", - ), - (response.status_code, response.content)) - - def test_DELETE_sets_foreign_keys_to_default(self): - default_zone = Zone.objects.get_default_zone() - self.become_admin() - zone = factory.make_zone() - node = factory.make_node(zone=zone) - - response = self.client.delete(get_zone_uri(zone)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - - node = reload_object(node) - self.assertIsNotNone(node) - self.assertEquals(default_zone, node.zone) - - def test_DELETE_is_idempotent(self): - self.become_admin() - zone = factory.make_zone() - response = self.client.delete(get_zone_uri(zone)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) - - response = self.client.delete(get_zone_uri(zone)) - self.assertEqual(httplib.NO_CONTENT, response.status_code) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_api_zones.py maas-1.7.6+bzr3376/src/maasserver/tests/test_api_zones.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_api_zones.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_api_zones.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for physical `Zone` API.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import json - -from django.core.urlresolvers import reverse -from maasserver.models import Zone -from maasserver.testing.api import APITestCase -from maasserver.testing.factory import factory - - -class TestZonesAPI(APITestCase): - - def test_handler_path(self): - self.assertEqual( - '/api/1.0/zones/', reverse('zones_handler')) - - def test_new_creates_zone(self): - self.become_admin() - name = factory.make_name('name') - description = factory.make_name('description') - response = self.client.post( - reverse('zones_handler'), - { - 'name': name, - 'description': description, - }) - self.assertEqual(httplib.OK, response.status_code, response.content) - zones = Zone.objects.filter(name=name) - self.assertItemsEqual( - [(name, description)], - [(zone.name, zone.description) for zone in zones]) - - def test_new_requires_admin(self): - name = factory.make_name('name') - description = factory.make_name('description') - response = self.client.post( - reverse('zones_handler'), - { - 'name': name, - 'description': description, - }) - self.assertEqual( - httplib.FORBIDDEN, response.status_code, response.content) - - def test_list_returns_zone_list(self): - [factory.make_zone(sortable_name=True) for i in range(3)] - zones = Zone.objects.all() - response = self.client.get( - reverse('zones_handler'), - {}) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - self.assertItemsEqual( - [( - zone.name, - zone.description, - reverse('zone_handler', args=[zone.name])) - for zone in zones], - [( - zone.get('name'), - zone.get('description'), - zone.get('resource_uri')) - for zone in parsed_result]) - - def test_list_returns_sorted_zone_list(self): - [factory.make_zone(sortable_name=True) for i in range(10)] - zones = Zone.objects.all() - response = self.client.get( - reverse('zones_handler'), - {}) - self.assertEqual(httplib.OK, response.status_code, response.content) - parsed_result = json.loads(response.content) - # Sorting is case-insensitive. - self.assertEqual( - sorted( - [ - zone.name - for zone in zones - ], key=lambda s: s.lower()), - [zone.get('name') for zone in parsed_result]) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_auth.py maas-1.7.6+bzr3376/src/maasserver/tests/test_auth.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_auth.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_auth.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( @@ -34,11 +34,11 @@ def make_user(self, name='test', password='test'): """Create a user with a password.""" - return factory.make_user(username=name, password=password) + return factory.make_User(username=name, password=password) def test_login(self): - name = factory.getRandomString() - password = factory.getRandomString() + name = factory.make_string() + password = factory.make_string() user = self.make_user(name, password) response = self.client.post( reverse('login'), {'username': name, 'password': password}) @@ -49,17 +49,17 @@ def test_login_failed(self): response = self.client.post( reverse('login'), { - 'username': factory.getRandomString(), - 'password': factory.getRandomString(), + 'username': factory.make_string(), + 'password': factory.make_string(), }) self.assertEqual(httplib.OK, response.status_code) self.assertNotIn('_auth_user_id', self.client.session) def test_logout(self): - name = factory.getRandomString() - password = factory.getRandomString() - factory.make_user(name, password) + name = factory.make_string() + password = factory.make_string() + factory.make_User(name, password) self.client.login(username=name, password=password) self.client.post(reverse('logout')) @@ -68,14 +68,14 @@ def make_unallocated_node(): """Return a node that is not allocated to anyone.""" - return factory.make_node() + return factory.make_Node() def make_allocated_node(owner=None): """Create a node, owned by `owner` (or create owner if not given).""" if owner is None: - owner = factory.make_user() - return factory.make_node(owner=owner, status=NODE_STATUS.ALLOCATED) + owner = factory.make_User() + return factory.make_Node(owner=owner, status=NODE_STATUS.ALLOCATED) class TestMAASAuthorizationBackend(MAASServerTestCase): @@ -102,13 +102,13 @@ def test_user_can_view_unowned_node(self): backend = MAASAuthorizationBackend() self.assertTrue(backend.has_perm( - factory.make_user(), NODE_PERMISSION.VIEW, + factory.make_User(), NODE_PERMISSION.VIEW, make_unallocated_node())) def test_user_can_view_nodes_owned_by_others(self): backend = MAASAuthorizationBackend() self.assertTrue(backend.has_perm( - factory.make_user(), NODE_PERMISSION.VIEW, make_allocated_node())) + factory.make_User(), NODE_PERMISSION.VIEW, make_allocated_node())) def test_owned_status(self): # A non-admin user can access nodes he owns. @@ -121,17 +121,17 @@ def test_user_cannot_edit_nodes_owned_by_others(self): backend = MAASAuthorizationBackend() self.assertFalse(backend.has_perm( - factory.make_user(), NODE_PERMISSION.EDIT, make_allocated_node())) + factory.make_User(), NODE_PERMISSION.EDIT, make_allocated_node())) def test_user_cannot_edit_unowned_node(self): backend = MAASAuthorizationBackend() self.assertFalse(backend.has_perm( - factory.make_user(), NODE_PERMISSION.EDIT, + factory.make_User(), NODE_PERMISSION.EDIT, make_unallocated_node())) def test_user_can_edit_his_own_nodes(self): backend = MAASAuthorizationBackend() - user = factory.make_user() + user = factory.make_User() self.assertTrue(backend.has_perm( user, NODE_PERMISSION.EDIT, make_allocated_node(owner=user))) @@ -139,10 +139,10 @@ # NODE_PERMISSION.ADMIN permission on nodes is granted to super users # only. backend = MAASAuthorizationBackend() - user = factory.make_user() + user = factory.make_User() self.assertFalse( backend.has_perm( - user, NODE_PERMISSION.ADMIN, factory.make_node())) + user, NODE_PERMISSION.ADMIN, factory.make_Node())) class TestNodeVisibility(MAASServerTestCase): @@ -158,7 +158,7 @@ factory.make_admin(), NODE_PERMISSION.VIEW)) def test_user_sees_own_nodes_and_unowned_nodes(self): - user = factory.make_user() + user = factory.make_User() make_allocated_node() own_node = make_allocated_node(owner=user) unowned_node = make_unallocated_node() diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_bootresources.py maas-1.7.6+bzr3376/src/maasserver/tests/test_bootresources.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_bootresources.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_bootresources.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,1229 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test maasserver.bootresources.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import httplib +import json +import os +from os import environ +from random import randint +from StringIO import StringIO +from subprocess import CalledProcessError + +from django.core.urlresolvers import reverse +from django.db import ( + connections, + transaction, + ) +from django.http import StreamingHttpResponse +from django.test.client import Client +from fixtures import Fixture +from maasserver import bootresources +from maasserver.bootresources import ( + BootResourceStore, + download_all_boot_resources, + download_boot_resources, + get_simplestream_endpoint, + ) +from maasserver.enum import ( + BOOT_RESOURCE_FILE_TYPE, + BOOT_RESOURCE_TYPE, + ) +from maasserver.models import ( + BootResource, + BootResourceFile, + BootResourceSet, + Config, + LargeFile, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.utils import absolute_reverse +from maasserver.utils.orm import get_one +from maastesting.djangotestcase import TransactionTestCase +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) +from maastesting.testcase import MAASTestCase +from mock import ( + ANY, + MagicMock, + Mock, + sentinel, + ) +from provisioningserver.auth import get_maas_user_gpghome +from provisioningserver.import_images.product_mapping import ProductMapping +from provisioningserver.rpc.testing import TwistedLoggerFixture +from testtools.deferredruntest import extract_result +from testtools.matchers import ContainsAll +from twisted.application.internet import TimerService +from twisted.internet.defer import fail + + +def make_boot_resource_file_with_stream(): + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + rfile = resource.sets.first().files.first() + with rfile.largefile.content.open('rb') as stream: + content = stream.read() + with rfile.largefile.content.open('wb') as stream: + stream.truncate() + return rfile, StringIO(content), content + + +class TestHelpers(MAASServerTestCase): + """Tests for `maasserver.bootresources` helpers.""" + + def test_get_simplestreams_endpoint(self): + endpoint = get_simplestream_endpoint() + self.assertEqual( + absolute_reverse( + 'simplestreams_stream_handler', + kwargs={'filename': 'index.json'}), + endpoint['url']) + self.assertEqual([], endpoint['selections']) + + +class SimplestreamsEnvFixture(Fixture): + """Clears the env variables set by the methods that interact with + simplestreams.""" + + def setUp(self): + super(SimplestreamsEnvFixture, self).setUp() + prior_env = {} + for key in ['GNUPGHOME', 'http_proxy', 'https_proxy']: + prior_env[key] = os.environ.get(key, '') + self.addCleanup(os.environ.update, prior_env) + + +class TestSimpleStreamsHandler(MAASServerTestCase): + """Tests for `maasserver.bootresources.SimpleStreamsHandler`.""" + + def reverse_stream_handler(self, filename): + return reverse( + 'simplestreams_stream_handler', kwargs={'filename': filename}) + + def reverse_file_handler( + self, os, arch, subarch, series, version, filename): + return reverse( + 'simplestreams_file_handler', kwargs={ + 'os': os, + 'arch': arch, + 'subarch': subarch, + 'series': series, + 'version': version, + 'filename': filename, + }) + + def get_stream_client(self, filename): + return self.client.get(self.reverse_stream_handler(filename)) + + def get_file_client(self, os, arch, subarch, series, version, filename): + return self.client.get( + self.reverse_file_handler( + os, arch, subarch, series, version, filename)) + + def get_product_name_for_resource(self, resource): + arch, subarch = resource.architecture.split('/') + if resource.rtype == BOOT_RESOURCE_TYPE.UPLOADED: + os = 'custom' + series = resource.name + else: + os, series = resource.name.split('/') + return 'maas:boot:%s:%s:%s:%s' % (os, arch, subarch, series) + + def make_usable_product_boot_resource(self): + resource = factory.make_usable_boot_resource() + return self.get_product_name_for_resource(resource), resource + + def test_streams_other_than_allowed_returns_404(self): + allowed_paths = [ + 'index.json', + 'maas:v2:download.json', + ] + invalid_paths = [ + '%s.json' % factory.make_name('path') + for _ in range(3) + ] + for path in allowed_paths: + response = self.get_stream_client(path) + self.assertEqual(httplib.OK, response.status_code) + for path in invalid_paths: + response = self.get_stream_client(path) + self.assertEqual(httplib.NOT_FOUND, response.status_code) + + def test_streams_product_index_contains_keys(self): + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertThat(output, ContainsAll(['index', 'updated', 'format'])) + + def test_streams_product_index_format_is_index_1(self): + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertEqual('index:1.0', output['format']) + + def test_streams_product_index_index_has_maas_v2_download(self): + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertThat(output['index'], ContainsAll(['maas:v2:download'])) + + def test_streams_product_index_maas_v2_download_contains_keys(self): + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertThat( + output['index']['maas:v2:download'], + ContainsAll([ + 'datatype', 'path', 'updated', 'products', 'format'])) + + def test_streams_product_index_maas_v2_download_has_valid_values(self): + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertEqual( + 'image-downloads', + output['index']['maas:v2:download']['datatype']) + self.assertEqual( + 'streams/v1/maas:v2:download.json', + output['index']['maas:v2:download']['path']) + self.assertEqual( + 'products:1.0', + output['index']['maas:v2:download']['format']) + + def test_streams_product_index_empty_products(self): + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertEqual( + [], + output['index']['maas:v2:download']['products']) + + def test_streams_product_index_empty_with_incomplete_resource(self): + resource = factory.make_BootResource() + factory.make_BootResourceSet(resource) + response = self.get_stream_client('index.json') + output = json.loads(response.content) + self.assertEqual( + [], + output['index']['maas:v2:download']['products']) + + def test_streams_product_index_with_resources(self): + products = [] + for _ in range(3): + product, _ = self.make_usable_product_boot_resource() + products.append(product) + response = self.get_stream_client('index.json') + output = json.loads(response.content) + # Product listing should be the same as all of the completed + # boot resources in the database. + self.assertItemsEqual( + products, + output['index']['maas:v2:download']['products']) + + def test_streams_product_download_contains_keys(self): + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + self.assertThat(output, ContainsAll([ + 'datatype', 'updated', 'content_id', 'products', 'format'])) + + def test_streams_product_download_has_valid_values(self): + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + self.assertEqual('image-downloads', output['datatype']) + self.assertEqual('maas:v2:download', output['content_id']) + self.assertEqual('products:1.0', output['format']) + + def test_streams_product_download_empty_products(self): + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + self.assertEqual( + {}, + output['products']) + + def test_streams_product_download_empty_with_incomplete_resource(self): + resource = factory.make_BootResource() + factory.make_BootResourceSet(resource) + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + self.assertEqual( + {}, + output['products']) + + def test_streams_product_download_has_valid_product_keys(self): + products = [] + for _ in range(3): + product, _ = self.make_usable_product_boot_resource() + products.append(product) + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + # Product listing should be the same as all of the completed + # boot resources in the database. + self.assertThat( + output['products'], + ContainsAll(products)) + + def test_streams_product_download_product_contains_keys(self): + product, _ = self.make_usable_product_boot_resource() + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + self.assertThat( + output['products'][product], + ContainsAll([ + 'versions', 'subarch', 'label', 'version', + 'arch', 'release', 'krel', 'os'])) + + def test_streams_product_download_product_has_valid_values(self): + product, resource = self.make_usable_product_boot_resource() + _, _, os, arch, subarch, series = product.split(':') + label = resource.get_latest_complete_set().label + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + output_product = output['products'][product] + self.assertEqual(subarch, output_product['subarch']) + self.assertEqual(label, output_product['label']) + self.assertEqual(series, output_product['version']) + self.assertEqual(arch, output_product['arch']) + self.assertEqual(series, output_product['release']) + self.assertEqual(series, output_product['krel']) + self.assertEqual(os, output_product['os']) + for key, value in resource.extra.items(): + self.assertEqual(value, output_product[key]) + + def test_streams_product_download_product_uses_latest_complete_label(self): + product, resource = self.make_usable_product_boot_resource() + # Incomplete resource_set + factory.make_BootResourceSet(resource) + newest_set = factory.make_BootResourceSet(resource) + factory.make_boot_resource_file_with_content(newest_set) + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + output_product = output['products'][product] + self.assertEqual(newest_set.label, output_product['label']) + + def test_streams_product_download_product_contains_multiple_versions(self): + resource = factory.make_BootResource() + resource_sets = [ + factory.make_BootResourceSet(resource) + for _ in range(3) + ] + versions = [] + for resource_set in resource_sets: + factory.make_boot_resource_file_with_content(resource_set) + versions.append(resource_set.version) + product = self.get_product_name_for_resource(resource) + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + self.assertThat( + output['products'][product]['versions'], + ContainsAll(versions)) + + def test_streams_product_download_product_version_contains_items(self): + product, resource = self.make_usable_product_boot_resource() + resource_set = resource.get_latest_complete_set() + items = [ + rfile.filename + for rfile in resource_set.files.all() + ] + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + version = output['products'][product]['versions'][resource_set.version] + self.assertThat( + version['items'], + ContainsAll(items)) + + def test_streams_product_download_product_item_contains_keys(self): + product, resource = self.make_usable_product_boot_resource() + resource_set = resource.get_latest_complete_set() + resource_file = resource_set.files.order_by('?')[0] + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + version = output['products'][product]['versions'][resource_set.version] + self.assertThat( + version['items'][resource_file.filename], + ContainsAll(['path', 'ftype', 'sha256', 'size'])) + + def test_streams_product_download_product_item_has_valid_values(self): + product, resource = self.make_usable_product_boot_resource() + _, _, os, arch, subarch, series = product.split(':') + resource_set = resource.get_latest_complete_set() + resource_file = resource_set.files.order_by('?')[0] + path = '%s/%s/%s/%s/%s/%s' % ( + os, arch, subarch, series, resource_set.version, + resource_file.filename) + response = self.get_stream_client('maas:v2:download.json') + output = json.loads(response.content) + version = output['products'][product]['versions'][resource_set.version] + item = version['items'][resource_file.filename] + self.assertEqual(path, item['path']) + self.assertEqual(resource_file.filetype, item['ftype']) + self.assertEqual(resource_file.largefile.sha256, item['sha256']) + self.assertEqual(resource_file.largefile.total_size, item['size']) + for key, value in resource_file.extra.items(): + self.assertEqual(value, item[key]) + + def test_download_invalid_boot_resource_returns_404(self): + os = factory.make_name('os') + series = factory.make_name('series') + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + version = factory.make_name('version') + filename = factory.make_name('filename') + response = self.get_file_client( + os, arch, subarch, series, version, filename) + self.assertEqual(httplib.NOT_FOUND, response.status_code) + + def test_download_invalid_version_returns_404(self): + product, resource = self.make_usable_product_boot_resource() + _, _, os, arch, subarch, series = product.split(':') + version = factory.make_name('version') + filename = factory.make_name('filename') + response = self.get_file_client( + os, arch, subarch, series, version, filename) + self.assertEqual(httplib.NOT_FOUND, response.status_code) + + def test_download_invalid_filename_returns_404(self): + product, resource = self.make_usable_product_boot_resource() + _, _, os, arch, subarch, series = product.split(':') + resource_set = resource.get_latest_complete_set() + version = resource_set.version + filename = factory.make_name('filename') + response = self.get_file_client( + os, arch, subarch, series, version, filename) + self.assertEqual(httplib.NOT_FOUND, response.status_code) + + def test_download_valid_path_returns_200(self): + product, resource = self.make_usable_product_boot_resource() + _, _, os, arch, subarch, series = product.split(':') + resource_set = resource.get_latest_complete_set() + version = resource_set.version + resource_file = resource_set.files.order_by('?')[0] + filename = resource_file.filename + response = self.get_file_client( + os, arch, subarch, series, version, filename) + self.assertEqual(httplib.OK, response.status_code) + + def test_download_returns_streaming_response(self): + product, resource = self.make_usable_product_boot_resource() + _, _, os, arch, subarch, series = product.split(':') + resource_set = resource.get_latest_complete_set() + version = resource_set.version + resource_file = resource_set.files.order_by('?')[0] + filename = resource_file.filename + response = self.get_file_client( + os, arch, subarch, series, version, filename) + self.assertIsInstance(response, StreamingHttpResponse) + + +class TestConnectionWrapper(TransactionTestCase): + """Tests the use of StreamingHttpResponse(ConnectionWrapper(stream)). + + We do not run this inside of `MAASServerTestCase` as that wraps a + transaction around each test. Since a new connection is created to return + the actual content, the transaction to create the data needs be committed. + """ + + def make_file_for_client(self): + # Set up the database information inside of a transaction. This is + # done so the information is committed. As the new connection needs + # to be able to access the data. + with transaction.atomic(): + os = factory.make_name('os') + series = factory.make_name('series') + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + name = '%s/%s' % (os, series) + architecture = '%s/%s' % (arch, subarch) + version = factory.make_name('version') + filetype = factory.pick_enum(BOOT_RESOURCE_FILE_TYPE) + # We set the filename to the same value as filetype, as in most + # cases this will always be true. The simplestreams content from + # maas.ubuntu.com, is formatted this way. + filename = filetype + size = randint(1024, 2048) + content = factory.make_string(size=size) + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, + architecture=architecture) + resource_set = factory.make_BootResourceSet( + resource, version=version) + largefile = factory.make_LargeFile(content=content, size=size) + factory.make_BootResourceFile( + resource_set, largefile, filename=filename, filetype=filetype) + return content, reverse( + 'simplestreams_file_handler', kwargs={ + 'os': os, + 'arch': arch, + 'subarch': subarch, + 'series': series, + 'version': version, + 'filename': filename, + }) + + def read_response(self, response): + """Read the streaming_content from the response. + + :rtype: bytes + """ + return b''.join(response.streaming_content) + + def test_download_calls__get_new_connection(self): + content, url = self.make_file_for_client() + mock_get_new_connection = self.patch( + bootresources.ConnectionWrapper, '_get_new_connection') + + client = Client() + response = client.get(url) + self.read_response(response) + self.assertThat(mock_get_new_connection, MockCalledOnceWith()) + + def test_download_connection_is_not_same_as_django_connections(self): + content, url = self.make_file_for_client() + + class AssertConnectionWrapper(bootresources.ConnectionWrapper): + + def _set_up(self): + super(AssertConnectionWrapper, self)._set_up() + # Capture the created connection + AssertConnectionWrapper.connection = self._connection + + def close(self): + # Close the stream, but we don't want to close the + # connection as the test is testing that the connection is + # not the same as the connection django is using for other + # webrequests. + if self._stream is not None: + self._stream.close() + self._stream = None + self._connection = None + + self.patch( + bootresources, 'ConnectionWrapper', AssertConnectionWrapper) + + client = Client() + response = client.get(url) + self.read_response(response) + + # Add cleanup to close the connection, since this was removed from + # AssertConnectionWrapper.close method. + def close(): + conn = AssertConnectionWrapper.connection + conn.commit() + conn.leave_transaction_management() + conn.close() + self.addCleanup(close) + + # The connection that is used by the wrapper cannot be the same as the + # connection be using for all other webrequests. Without this + # seperate the transactional middleware will fail to initialize, + # because the the connection will already be in a transaction. + # + # Note: cannot test if DatabaseWrapper != DatabaseWrapper, as it will + # report true, because the __eq__ operator only checks if the aliases + # are the same. This is checking the underlying connection is + # different, which is the important part. + self.assertNotEqual( + connections["default"].connection, + AssertConnectionWrapper.connection.connection) + + +def make_product(): + """Make product dictionary that is just like the one provided + from simplsetreams.""" + subarch = factory.make_name('subarch') + subarches = [factory.make_name('subarch') for _ in range(3)] + subarches.insert(0, subarch) + subarches = ','.join(subarches) + product = { + 'os': factory.make_name('os'), + 'arch': factory.make_name('arch'), + 'subarch': subarch, + 'release': factory.make_name('release'), + 'kflavor': factory.make_name('kflavor'), + 'subarches': subarches, + 'version_name': factory.make_name('version'), + 'label': factory.make_name('label'), + 'ftype': factory.pick_enum(BOOT_RESOURCE_FILE_TYPE), + 'kpackage': factory.make_name('kpackage'), + 'di_version': factory.make_name('di_version'), + } + name = '%s/%s' % (product['os'], product['release']) + architecture = '%s/%s' % (product['arch'], product['subarch']) + return name, architecture, product + + +def make_boot_resource_group( + rtype=None, name=None, architecture=None, + version=None, filename=None, filetype=None): + """Make boot resource that contains one set and one file.""" + resource = factory.make_BootResource( + rtype=rtype, name=name, architecture=architecture) + resource_set = factory.make_BootResourceSet(resource, version=version) + rfile = factory.make_boot_resource_file_with_content( + resource_set, filename=filename, filetype=filetype) + return resource, resource_set, rfile + + +def make_boot_resource_group_from_product(product): + """Make boot resource that contains one set and one file, using the + information from the given product. + + The product dictionary is also updated to include the sha256 and size + for the created largefile. The calling function should use the returned + product in place of the passed product. + """ + name = '%s/%s' % (product['os'], product['release']) + architecture = '%s/%s' % (product['arch'], product['subarch']) + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, + architecture=architecture) + resource_set = factory.make_BootResourceSet( + resource, version=product['version_name']) + rfile = factory.make_boot_resource_file_with_content( + resource_set, filename=product['ftype'], + filetype=product['ftype']) + product['sha256'] = rfile.largefile.sha256 + product['size'] = rfile.largefile.total_size + return product, resource + + +class TestBootResourceStore(MAASServerTestCase): + + def make_boot_resources(self): + resources = [ + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + for _ in range(3) + ] + resource_names = [] + for resource in resources: + os, series = resource.name.split('/') + arch, subarch = resource.split_arch() + name = '%s/%s/%s/%s' % (os, arch, subarch, series) + resource_names.append(name) + return resources, resource_names + + def test_init_initializes_variables(self): + _, resource_names = self.make_boot_resources() + store = BootResourceStore() + self.assertItemsEqual(resource_names, store._resources_to_delete) + self.assertEqual({}, store._content_to_finalize) + + def test_prevent_resource_deletion_removes_resource(self): + resources, resource_names = self.make_boot_resources() + store = BootResourceStore() + resource = resources.pop() + resource_names.pop() + store.prevent_resource_deletion(resource) + self.assertItemsEqual(resource_names, store._resources_to_delete) + + def test_prevent_resource_deletion_doesnt_remove_unknown_resource(self): + resources, resource_names = self.make_boot_resources() + store = BootResourceStore() + resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + store.prevent_resource_deletion(resource) + self.assertItemsEqual(resource_names, store._resources_to_delete) + + def test_save_content_later_adds_to__content_to_finalize_var(self): + _, _, rfile = make_boot_resource_group() + store = BootResourceStore() + store.save_content_later(rfile, sentinel.reader) + self.assertEqual( + {rfile.id: sentinel.reader}, + store._content_to_finalize) + + def test_get_or_create_boot_resource_creates_resource(self): + name, architecture, product = make_product() + store = BootResourceStore() + resource = store.get_or_create_boot_resource(product) + self.assertEqual(BOOT_RESOURCE_TYPE.SYNCED, resource.rtype) + self.assertEqual(name, resource.name) + self.assertEqual(architecture, resource.architecture) + self.assertEqual(product['kflavor'], resource.extra['kflavor']) + self.assertEqual(product['subarches'], resource.extra['subarches']) + + def test_get_or_create_boot_resource_gets_resource(self): + name, architecture, product = make_product() + expected = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, + architecture=architecture) + store = BootResourceStore() + resource = store.get_or_create_boot_resource(product) + self.assertEqual(expected, resource) + self.assertEqual(product['kflavor'], resource.extra['kflavor']) + self.assertEqual(product['subarches'], resource.extra['subarches']) + + def test_get_or_create_boot_resource_calls_prevent_resource_deletion(self): + name, architecture, product = make_product() + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture=architecture) + store = BootResourceStore() + mock_prevent = self.patch(store, 'prevent_resource_deletion') + store.get_or_create_boot_resource(product) + self.assertThat( + mock_prevent, MockCalledOnceWith(resource)) + + def test_get_or_create_boot_resource_converts_generated_into_synced(self): + name, architecture, product = make_product() + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.GENERATED, + name=name, architecture=architecture) + store = BootResourceStore() + mock_prevent = self.patch(store, 'prevent_resource_deletion') + store.get_or_create_boot_resource(product) + self.assertEqual( + BOOT_RESOURCE_TYPE.SYNCED, + reload_object(resource).rtype) + self.assertThat( + mock_prevent, MockNotCalled()) + + def test_get_or_create_boot_resource_set_creates_resource_set(self): + name, architecture, product = make_product() + product, resource = make_boot_resource_group_from_product(product) + resource.sets.all().delete() + store = BootResourceStore() + resource_set = store.get_or_create_boot_resource_set(resource, product) + self.assertEqual(product['version_name'], resource_set.version) + self.assertEqual(product['label'], resource_set.label) + + def test_get_or_create_boot_resource_set_gets_resource_set(self): + name, architecture, product = make_product() + product, resource = make_boot_resource_group_from_product(product) + expected = resource.sets.first() + store = BootResourceStore() + resource_set = store.get_or_create_boot_resource_set(resource, product) + self.assertEqual(expected, resource_set) + self.assertEqual(product['label'], resource_set.label) + + def test_get_or_create_boot_resource_file_creates_resource_file(self): + name, architecture, product = make_product() + product, resource = make_boot_resource_group_from_product(product) + resource_set = resource.sets.first() + resource_set.files.all().delete() + store = BootResourceStore() + rfile = store.get_or_create_boot_resource_file(resource_set, product) + self.assertEqual(product['ftype'], rfile.filename) + self.assertEqual(product['ftype'], rfile.filetype) + self.assertEqual(product['kpackage'], rfile.extra['kpackage']) + self.assertEqual(product['di_version'], rfile.extra['di_version']) + + def test_get_or_create_boot_resource_file_gets_resource_file(self): + name, architecture, product = make_product() + product, resource = make_boot_resource_group_from_product(product) + resource_set = resource.sets.first() + expected = resource_set.files.first() + store = BootResourceStore() + rfile = store.get_or_create_boot_resource_file(resource_set, product) + self.assertEqual(expected, rfile) + self.assertEqual(product['ftype'], rfile.filetype) + self.assertEqual(product['kpackage'], rfile.extra['kpackage']) + self.assertEqual(product['di_version'], rfile.extra['di_version']) + + def test_get_resource_file_log_identifier_returns_valid_ident(self): + os = factory.make_name('os') + series = factory.make_name('series') + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + version = factory.make_name('version') + filename = factory.make_name('filename') + name = '%s/%s' % (os, series) + architecture = '%s/%s' % (arch, subarch) + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, + architecture=architecture) + resource_set = factory.make_BootResourceSet( + resource, version=version) + rfile = factory.make_boot_resource_file_with_content( + resource_set, filename=filename) + store = BootResourceStore() + self.assertEqual( + '%s/%s/%s/%s/%s/%s' % ( + os, arch, subarch, series, version, filename), + store.get_resource_file_log_identifier(rfile)) + self.assertEqual( + '%s/%s/%s/%s/%s/%s' % ( + os, arch, subarch, series, version, filename), + store.get_resource_file_log_identifier( + rfile, resource_set, resource)) + + def test_write_content_saves_data(self): + rfile, reader, content = make_boot_resource_file_with_stream() + store = BootResourceStore() + store.write_content(rfile, reader) + self.assertTrue(BootResourceFile.objects.filter(id=rfile.id).exists()) + with rfile.largefile.content.open('rb') as stream: + written_data = stream.read() + self.assertEqual(content, written_data) + + def test_write_content_deletes_file_on_bad_checksum(self): + rfile, _, _ = make_boot_resource_file_with_stream() + reader = StringIO(factory.make_string()) + store = BootResourceStore() + store.write_content(rfile, reader) + self.assertFalse(BootResourceFile.objects.filter(id=rfile.id).exists()) + + def test_finalize_does_nothing_if_resources_to_delete_hasnt_changed(self): + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + store = BootResourceStore() + mock_resource_cleaner = self.patch(store, 'resource_cleaner') + mock_perform_write = self.patch(store, 'perform_write') + mock_resource_set_cleaner = self.patch(store, 'resource_set_cleaner') + store.finalize() + self.expectThat(mock_resource_cleaner, MockNotCalled()) + self.expectThat(mock_perform_write, MockNotCalled()) + self.expectThat(mock_resource_set_cleaner, MockNotCalled()) + + def test_finalize_calls_methods_if_new_resources_need_to_be_saved(self): + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + store = BootResourceStore() + store._content_to_finalize = [sentinel.content] + mock_resource_cleaner = self.patch(store, 'resource_cleaner') + mock_perform_write = self.patch(store, 'perform_write') + mock_resource_set_cleaner = self.patch(store, 'resource_set_cleaner') + store.finalize() + self.expectThat(mock_resource_cleaner, MockCalledOnceWith()) + self.expectThat(mock_perform_write, MockCalledOnceWith()) + self.expectThat(mock_resource_set_cleaner, MockCalledOnceWith()) + + def test_finalize_calls_methods_if_resources_to_delete_has_changed(self): + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + store = BootResourceStore() + store._resources_to_delete = set() + mock_resource_cleaner = self.patch(store, 'resource_cleaner') + mock_perform_write = self.patch(store, 'perform_write') + mock_resource_set_cleaner = self.patch(store, 'resource_set_cleaner') + store.finalize() + self.expectThat(mock_resource_cleaner, MockCalledOnceWith()) + self.expectThat(mock_perform_write, MockCalledOnceWith()) + self.expectThat(mock_resource_set_cleaner, MockCalledOnceWith()) + + +class TestBootResourceTransactional(TransactionTestCase): + """Test methods on `BootResourceStore` that manage their own transactions. + + This is done using TransactionTestCase so the database is flushed after + each test run. + """ + + def test_insert_does_nothing_if_file_already_exists(self): + name, architecture, product = make_product() + with transaction.atomic(): + product, resource = make_boot_resource_group_from_product(product) + rfile = resource.sets.first().files.first() + largefile = rfile.largefile + store = BootResourceStore() + mock_save_later = self.patch(store, 'save_content_later') + store.insert(product, sentinel.reader) + self.assertEqual(largefile, reload_object(rfile).largefile) + self.assertThat(mock_save_later, MockNotCalled()) + + def test_insert_uses_already_existing_largefile(self): + name, architecture, product = make_product() + with transaction.atomic(): + product, resource = make_boot_resource_group_from_product(product) + resource_set = resource.sets.first() + resource_set.files.all().delete() + largefile = factory.make_LargeFile() + product['sha256'] = largefile.sha256 + product['size'] = largefile.total_size + store = BootResourceStore() + mock_save_later = self.patch(store, 'save_content_later') + store.insert(product, sentinel.reader) + self.assertEqual( + largefile, + get_one(reload_object(resource_set).files.all()).largefile) + self.assertThat(mock_save_later, MockNotCalled()) + + def test_insert_deletes_mismatch_largefile(self): + name, architecture, product = make_product() + with transaction.atomic(): + product, resource = make_boot_resource_group_from_product(product) + rfile = resource.sets.first().files.first() + delete_largefile = rfile.largefile + largefile = factory.make_LargeFile() + product['sha256'] = largefile.sha256 + product['size'] = largefile.total_size + store = BootResourceStore() + mock_save_later = self.patch(store, 'save_content_later') + store.insert(product, sentinel.reader) + self.assertFalse( + LargeFile.objects.filter(id=delete_largefile.id).exists()) + self.assertEqual(largefile, reload_object(rfile).largefile) + self.assertThat(mock_save_later, MockNotCalled()) + + def test_insert_deletes_mismatch_largefile_keeps_other_resource_file(self): + name, architecture, product = make_product() + with transaction.atomic(): + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, + architecture=architecture) + resource_set = factory.make_BootResourceSet( + resource, version=product['version_name']) + other_type = factory.pick_enum( + BOOT_RESOURCE_FILE_TYPE, but_not=product['ftype']) + other_file = factory.make_boot_resource_file_with_content( + resource_set, filename=other_type, filetype=other_type) + rfile = factory.make_BootResourceFile( + resource_set, other_file.largefile, + filename=product['ftype'], filetype=product['ftype']) + largefile = factory.make_LargeFile() + product['sha256'] = largefile.sha256 + product['size'] = largefile.total_size + store = BootResourceStore() + mock_save_later = self.patch(store, 'save_content_later') + store.insert(product, sentinel.reader) + self.assertEqual(largefile, reload_object(rfile).largefile) + self.assertTrue( + LargeFile.objects.filter(id=other_file.largefile.id).exists()) + self.assertTrue( + BootResourceFile.objects.filter(id=other_file.id).exists()) + self.assertEqual( + other_file.largefile, reload_object(other_file).largefile) + self.assertThat(mock_save_later, MockNotCalled()) + + def test_insert_creates_new_largefile(self): + name, architecture, product = make_product() + with transaction.atomic(): + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, + architecture=architecture) + resource_set = factory.make_BootResourceSet( + resource, version=product['version_name']) + product['sha256'] = factory.make_string(size=64) + product['size'] = randint(1024, 2048) + store = BootResourceStore() + mock_save_later = self.patch(store, 'save_content_later') + store.insert(product, sentinel.reader) + rfile = get_one(reload_object(resource_set).files.all()) + self.assertEqual(product['sha256'], rfile.largefile.sha256) + self.assertEqual(product['size'], rfile.largefile.total_size) + self.assertThat( + mock_save_later, + MockCalledOnceWith(rfile, sentinel.reader)) + + def test_resource_cleaner_removes_old_boot_resources(self): + with transaction.atomic(): + resources = [ + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + for _ in range(3) + ] + store = BootResourceStore() + store.resource_cleaner() + for resource in resources: + os, series = resource.name.split('/') + arch, subarch = resource.split_arch() + self.assertFalse( + BootResource.objects.has_synced_resource( + os, arch, subarch, series)) + + def test_resource_set_cleaner_removes_incomplete_set(self): + with transaction.atomic(): + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + incomplete_set = factory.make_BootResourceSet(resource) + store = BootResourceStore() + store.resource_set_cleaner() + self.assertFalse( + BootResourceSet.objects.filter(id=incomplete_set.id).exists()) + + def test_resource_set_cleaner_keeps_only_newest_completed_set(self): + with transaction.atomic(): + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + old_complete_sets = [] + for _ in range(3): + resource_set = factory.make_BootResourceSet(resource) + factory.make_boot_resource_file_with_content(resource_set) + old_complete_sets.append(resource_set) + newest_set = factory.make_BootResourceSet(resource) + factory.make_boot_resource_file_with_content(newest_set) + store = BootResourceStore() + store.resource_set_cleaner() + self.assertItemsEqual([newest_set], resource.sets.all()) + for resource_set in old_complete_sets: + self.assertFalse( + BootResourceSet.objects.filter(id=resource_set.id).exists()) + + def test_resource_set_cleaner_removes_resources_with_empty_sets(self): + with transaction.atomic(): + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + store = BootResourceStore() + store.resource_set_cleaner() + self.assertFalse( + BootResource.objects.filter(id=resource.id).exists()) + + def test_perform_writes_writes_all_content(self): + with transaction.atomic(): + files = [make_boot_resource_file_with_stream() for _ in range(3)] + store = BootResourceStore() + for rfile, reader, content in files: + store.save_content_later(rfile, reader) + store.perform_write() + with transaction.atomic(): + for rfile, reader, content in files: + self.assertTrue( + BootResourceFile.objects.filter(id=rfile.id).exists()) + with rfile.largefile.content.open('rb') as stream: + written_data = stream.read() + self.assertEqual(content, written_data) + + +class TestImportImages(MAASTestCase): + + def setUp(self): + super(TestImportImages, self).setUp() + self.useFixture(SimplestreamsEnvFixture()) + + def patch_and_capture_env_for_download_all_boot_resources(self): + class CaptureEnv: + """Fake function; records a copy of the environment.""" + + def __call__(self, *args, **kwargs): + self.args = args + self.env = environ.copy() + + capture = self.patch( + bootresources, 'download_all_boot_resources', CaptureEnv()) + return capture + + def test_download_boot_resources_syncs_repo(self): + fake_sync = self.patch(bootresources.BootResourceRepoWriter, 'sync') + store = BootResourceStore() + source_url = factory.make_url() + download_boot_resources( + source_url, store, None, None) + self.assertEqual(1, len(fake_sync.mock_calls)) + + def test_download_all_boot_resources_calls_download_boot_resources(self): + source = { + 'url': factory.make_url(), + 'keyring': self.make_file("keyring"), + } + product_mapping = ProductMapping() + store = BootResourceStore() + fake_download = self.patch(bootresources, 'download_boot_resources') + download_all_boot_resources( + sources=[source], product_mapping=product_mapping, store=store) + self.assertThat( + fake_download, + MockCalledOnceWith( + source['url'], store, product_mapping, + keyring_file=source['keyring'])) + + def test_download_all_boot_resources_calls_finalize_on_store(self): + product_mapping = ProductMapping() + store = BootResourceStore() + fake_finalize = self.patch(store, 'finalize') + download_all_boot_resources( + sources=[], product_mapping=product_mapping, store=store) + self.assertThat( + fake_finalize, + MockCalledOnceWith()) + + def test_has_synced_resources_returns_true(self): + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) + self.assertTrue(bootresources.has_synced_resources()) + + def test_has_synced_resources_returns_false(self): + factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) + self.assertFalse(bootresources.has_synced_resources()) + + def test__import_resources_exits_early_if_lock_held(self): + has_synced_resources = self.patch_autospec( + bootresources, "has_synced_resources") + with transaction.atomic(): + with bootresources.locks.import_images: + bootresources._import_resources(force=True) + # The test for already-synced resources is not called if the + # lock is already held. + self.assertThat(has_synced_resources, MockNotCalled()) + + def test__import_resources_exits_early_without_force(self): + has_synced_resources = self.patch( + bootresources, "has_synced_resources") + bootresources._import_resources(force=False) + # The test for already-synced resources is not performed if we're + # forcing a sync. + self.assertThat(has_synced_resources, MockCalledOnceWith()) + + def test__import_resources_continues_with_force(self): + has_synced_resources = self.patch( + bootresources, "has_synced_resources") + bootresources._import_resources(force=True) + # The test for already-synced resources is performed if we're not + # forcing a sync. + self.assertThat(has_synced_resources, MockNotCalled()) + + def test__import_resources_holds_lock(self): + fake_write_all_keyrings = self.patch( + bootresources, 'write_all_keyrings') + + def test_for_held_lock(directory, sources): + self.assertTrue(bootresources.locks.import_images.is_locked()) + return [] + fake_write_all_keyrings.side_effect = test_for_held_lock + + bootresources._import_resources(force=True) + self.assertFalse(bootresources.locks.import_images.is_locked()) + + def test__import_resources_calls_functions_with_correct_parameters(self): + cache_boot_sources = self.patch( + bootresources, 'cache_boot_sources') + write_all_keyrings = self.patch( + bootresources, 'write_all_keyrings') + write_all_keyrings.return_value = [sentinel.source] + image_descriptions = self.patch( + bootresources, 'download_all_image_descriptions') + descriptions = Mock() + descriptions.is_empty.return_value = False + image_descriptions.return_value = descriptions + map_products = self.patch( + bootresources, 'map_products') + map_products.return_value = sentinel.mapping + download_all_boot_resources = self.patch( + bootresources, 'download_all_boot_resources') + + bootresources._import_resources(force=True) + + self.expectThat( + cache_boot_sources, MockCalledOnceWith()) + self.expectThat( + write_all_keyrings, + MockCalledOnceWith(ANY, [])) + self.expectThat( + image_descriptions, + MockCalledOnceWith([sentinel.source])) + self.expectThat( + map_products, + MockCalledOnceWith(descriptions)) + self.expectThat( + download_all_boot_resources, + MockCalledOnceWith([sentinel.source], sentinel.mapping)) + + def test__import_resources_has_env_GNUPGHOME_set(self): + fake_image_descriptions = self.patch( + bootresources, 'download_all_image_descriptions') + descriptions = Mock() + descriptions.is_empty.return_value = False + fake_image_descriptions.return_value = descriptions + self.patch(bootresources, 'map_products') + capture = self.patch_and_capture_env_for_download_all_boot_resources() + + bootresources._import_resources(force=True) + self.assertEqual( + get_maas_user_gpghome(), capture.env['GNUPGHOME']) + + def test__import_resources_has_env_http_and_https_proxy_set(self): + proxy_address = factory.make_name('proxy') + Config.objects.set_config('http_proxy', proxy_address) + + fake_image_descriptions = self.patch( + bootresources, 'download_all_image_descriptions') + descriptions = Mock() + descriptions.is_empty.return_value = False + fake_image_descriptions.return_value = descriptions + self.patch(bootresources, 'map_products') + capture = self.patch_and_capture_env_for_download_all_boot_resources() + + bootresources._import_resources(force=True) + self.assertEqual( + (proxy_address, proxy_address), + (capture.env['http_proxy'], capture.env['http_proxy'])) + + def test__import_resources_calls_import_boot_images_on_clusters(self): + nodegroup = MagicMock() + self.patch(bootresources, 'NodeGroup', nodegroup) + + fake_image_descriptions = self.patch( + bootresources, 'download_all_image_descriptions') + descriptions = Mock() + descriptions.is_empty.return_value = False + fake_image_descriptions.return_value = descriptions + self.patch(bootresources, 'map_products') + self.patch(bootresources, 'download_all_boot_resources') + + bootresources._import_resources(force=True) + self.assertThat( + nodegroup.objects.import_boot_images_on_accepted_clusters, + MockCalledOnceWith()) + + +class TestImportResourcesInThread(MAASTestCase): + """Tests for `_import_resources_in_thread`.""" + + def test__defers__import_resources_to_thread(self): + deferToThread = self.patch(bootresources, "deferToThread") + bootresources._import_resources_in_thread(force=sentinel.force) + self.assertThat( + deferToThread, MockCalledOnceWith( + bootresources._import_resources, force=sentinel.force)) + + def tests__defaults_force_to_False(self): + deferToThread = self.patch(bootresources, "deferToThread") + bootresources._import_resources_in_thread() + self.assertThat( + deferToThread, MockCalledOnceWith( + bootresources._import_resources, force=False)) + + def test__logs_errors_and_does_not_errback(self): + logger = self.useFixture(TwistedLoggerFixture()) + exception_type = factory.make_exception_type() + deferToThread = self.patch(bootresources, "deferToThread") + deferToThread.return_value = fail(exception_type()) + d = bootresources._import_resources_in_thread(force=sentinel.force) + self.assertIsNone(extract_result(d)) + self.assertDocTestMatches( + """\ + Importing boot resources failed. + Traceback (most recent call last): + ... + """, + logger.output) + + def test__logs_subprocess_output_on_error(self): + logger = self.useFixture(TwistedLoggerFixture()) + exception = CalledProcessError( + 2, [factory.make_name("command")], + factory.make_name("output")) + deferToThread = self.patch(bootresources, "deferToThread") + deferToThread.return_value = fail(exception) + d = bootresources._import_resources_in_thread(force=sentinel.force) + self.assertIsNone(extract_result(d)) + self.assertDocTestMatches( + """\ + Importing boot resources failed. + Traceback (most recent call last): + Failure: subprocess.CalledProcessError: + Command `command-...` returned non-zero exit status 2: + output-... + """, + logger.output) + + +class TestImportResourcesService(MAASTestCase): + """Tests for `ImportResourcesService`.""" + + def test__is_a_TimerService(self): + service = bootresources.ImportResourcesService() + self.assertIsInstance(service, TimerService) + + def test__runs_once_an_hour(self): + service = bootresources.ImportResourcesService() + self.assertEqual(3600, service.step) + + def test__calls__import_resources_in_thread(self): + service = bootresources.ImportResourcesService() + self.assertEqual( + (bootresources._import_resources_in_thread, (), {}), + service.call) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_bootsources.py maas-1.7.6+bzr3376/src/maasserver/tests/test_bootsources.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_bootsources.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_bootsources.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,275 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test maasserver.bootsources.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from os import environ + +from maasserver import bootsources +from maasserver.bootsources import ( + cache_boot_sources, + cache_boot_sources_in_thread, + ensure_boot_source_definition, + get_boot_sources, + get_os_info_from_boot_sources, + ) +from maasserver.components import ( + get_persistent_error, + register_persistent_error, + ) +from maasserver.enum import COMPONENT +from maasserver.models import ( + BootSource, + BootSourceCache, + BootSourceSelection, + Config, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.tests.test_bootresources import SimplestreamsEnvFixture +from maastesting.matchers import MockCalledOnceWith +from mock import MagicMock +from provisioningserver.import_images import ( + download_descriptions as download_descriptions_module, + ) +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.helpers import ImageSpec +from requests.exceptions import ConnectionError +from testtools.matchers import HasLength +from twisted.internet import reactor +from twisted.internet.threads import deferToThread + + +def patch_and_capture_env_for_download_all_image_descriptions(testcase): + class CaptureEnv: + """Fake function; records a copy of the environment.""" + + def __call__(self, *args, **kwargs): + self.args = args + self.env = environ.copy() + return MagicMock() + + capture = testcase.patch( + bootsources, 'download_all_image_descriptions', CaptureEnv()) + return capture + + +def make_image_spec( + os=None, arch=None, subarch=None, release=None, label=None): + if os is None: + os = factory.make_name('os') + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + if release is None: + release = factory.make_name('release') + if label is None: + label = factory.make_name('label') + return ImageSpec( + os, + arch, + subarch, + release, + label, + ) + + +def make_boot_image_mapping(image_specs=[]): + mapping = BootImageMapping() + for image_spec in image_specs: + mapping.setdefault(image_spec, {}) + return mapping + + +class TestHelpers(MAASServerTestCase): + + def test_ensure_boot_source_definition_creates_default_source(self): + BootSource.objects.all().delete() + ensure_boot_source_definition() + sources = BootSource.objects.all() + self.assertThat(sources, HasLength(1)) + [source] = sources + self.assertAttributes( + source, + { + 'url': 'http://maas.ubuntu.com/images/ephemeral-v2/releases/', + 'keyring_filename': ( + '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'), + }) + selections = BootSourceSelection.objects.filter(boot_source=source) + by_release = { + selection.release: selection + for selection in selections + } + self.assertItemsEqual(['trusty'], by_release.keys()) + self.assertAttributes( + by_release['trusty'], + { + 'release': 'trusty', + 'arches': ['amd64'], + 'subarches': ['*'], + 'labels': ['release'], + }) + + def test_ensure_boot_source_definition_skips_if_already_present(self): + sources = [ + factory.make_BootSource() + for _ in range(3) + ] + ensure_boot_source_definition() + self.assertItemsEqual(sources, BootSource.objects.all()) + + def test_get_boot_sources(self): + sources = [ + factory.make_BootSource( + keyring_data="data").to_dict() + for _ in range(3) + ] + self.assertItemsEqual(sources, get_boot_sources()) + + +class TestGetOSInfoFromBootSources(MAASServerTestCase): + + def test__returns_empty_sources_and_sets_when_cache_empty(self): + self.assertEqual( + ([], set(), set()), + get_os_info_from_boot_sources(factory.make_name('os'))) + + def test__returns_empty_sources_and_sets_when_no_os(self): + factory.make_BootSourceCache() + self.assertEqual( + ([], set(), set()), + get_os_info_from_boot_sources(factory.make_name('os'))) + + def test__returns_sources_and_sets_of_releases_and_architectures(self): + os = factory.make_name('os') + sources = [ + factory.make_BootSource(keyring_data='1234') for _ in range(2)] + releases = set() + arches = set() + for source in sources: + for _ in range(3): + release = factory.make_name('release') + arch = factory.make_name('arch') + factory.make_BootSourceCache( + source, os=os, release=release, arch=arch) + releases.add(release) + arches.add(arch) + self.assertEqual( + (sources, releases, arches), + get_os_info_from_boot_sources(os)) + + +class TestPrivateCacheBootSources(MAASServerTestCase): + + def setUp(self): + super(TestPrivateCacheBootSources, self).setUp() + self.useFixture(SimplestreamsEnvFixture()) + + def test__has_env_GNUPGHOME_set(self): + capture = ( + patch_and_capture_env_for_download_all_image_descriptions(self)) + factory.make_BootSource(keyring_data='1234') + cache_boot_sources() + self.assertEqual( + bootsources.get_maas_user_gpghome(), + capture.env['GNUPGHOME']) + + def test__has_env_http_and_https_proxy_set(self): + proxy_address = factory.make_name('proxy') + Config.objects.set_config('http_proxy', proxy_address) + capture = ( + patch_and_capture_env_for_download_all_image_descriptions(self)) + factory.make_BootSource(keyring_data='1234') + cache_boot_sources() + self.assertEqual( + (proxy_address, proxy_address), + (capture.env['http_proxy'], capture.env['http_proxy'])) + + def test__returns_clears_entire_cache(self): + source = factory.make_BootSource(keyring_data='1234') + factory.make_BootSourceCache(source) + mock_download = self.patch( + bootsources, 'download_all_image_descriptions') + mock_download.return_value = make_boot_image_mapping() + cache_boot_sources() + self.assertEqual(0, BootSourceCache.objects.all().count()) + + def test__returns_adds_entries_to_cache_for_source(self): + source = factory.make_BootSource(keyring_data='1234') + os = factory.make_name('os') + releases = [factory.make_name('release') for _ in range(3)] + image_specs = [ + make_image_spec(os=os, release=release) for release in releases] + mock_download = self.patch( + bootsources, 'download_all_image_descriptions') + mock_download.return_value = make_boot_image_mapping(image_specs) + cache_boot_sources() + cached_releases = [ + cache.release + for cache in BootSourceCache.objects.filter(boot_source=source) + if cache.os == os + ] + self.assertItemsEqual(releases, cached_releases) + + +class TestBadConnectionHandling(MAASServerTestCase): + + def setUp(self): + super(TestBadConnectionHandling, self).setUp() + self.useFixture(SimplestreamsEnvFixture()) + + def test__catches_connection_errors_and_sets_component_error(self): + sources = [ + factory.make_BootSource(keyring_data='1234') for _ in range(3)] + download_image_descriptions = self.patch( + download_descriptions_module, 'download_image_descriptions') + error_text = factory.make_name("error_text") + # Make two of the downloads fail. + download_image_descriptions.side_effect = [ + ConnectionError(error_text), + BootImageMapping(), + IOError(error_text), + ] + cache_boot_sources() + base_error = "Failed to import images from boot source {url}: {err}" + error_part_one = base_error.format(url=sources[0].url, err=error_text) + error_part_two = base_error.format(url=sources[2].url, err=error_text) + expected_error = error_part_one + '\n' + error_part_two + actual_error = get_persistent_error(COMPONENT.REGION_IMAGE_IMPORT) + self.assertEqual(expected_error, actual_error) + + def test__clears_component_error_when_successful(self): + register_persistent_error( + COMPONENT.REGION_IMAGE_IMPORT, factory.make_string()) + [factory.make_BootSource(keyring_data='1234') for _ in range(3)] + download_image_descriptions = self.patch( + download_descriptions_module, 'download_image_descriptions') + # Make all of the downloads successful. + download_image_descriptions.return_value = BootImageMapping() + cache_boot_sources() + self.assertIsNone(get_persistent_error(COMPONENT.REGION_IMAGE_IMPORT)) + + +class TestCacheBootSources(MAASServerTestCase): + + def test__calls_callLater_in_reactor(self): + mock_callLater = self.patch(reactor, 'callLater') + cache_boot_sources_in_thread() + self.assertThat( + mock_callLater, + MockCalledOnceWith(1, deferToThread, cache_boot_sources)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_commands_edit_named_options.py maas-1.7.6+bzr3376/src/maasserver/tests/test_commands_edit_named_options.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_commands_edit_named_options.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_commands_edit_named_options.py 2015-07-10 01:27:14.000000000 +0000 @@ -15,6 +15,7 @@ __all__ = [] from codecs import getwriter +from collections import OrderedDict from io import BytesIO import os import shutil @@ -22,11 +23,22 @@ from django.core.management import call_command from django.core.management.base import CommandError +from maasserver.management.commands.edit_named_options import ( + Command as command_module, + ) +from maasserver.models import Config from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase +from maasserver.utils import get_one from provisioningserver.dns.config import MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME +from provisioningserver.utils.isc import ( + make_isc_string, + parse_isc_string, + read_isc_file, + ) from testtools.matchers import ( Contains, + Equals, FileContains, Not, ) @@ -43,7 +55,27 @@ OPTIONS_FILE_WITH_FORWARDERS = textwrap.dedent("""\ options { directory "/var/cache/bind"; - forwarders { 192.168.1.1; }; + forwarders { 192.168.1.1; 192.168.1.2; }; + auth-nxdomain no; # conform to RFC1035 + listen-on-v6 { any; }; + }; +""") + +OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC = textwrap.dedent("""\ + options { + directory "/var/cache/bind"; + forwarders { 192.168.1.1; 192.168.1.2; }; + dnssec-validation no; + auth-nxdomain no; # conform to RFC1035 + listen-on-v6 { any; }; + }; +""") + +OPTIONS_FILE_WITH_EXTRA_AND_DUP_FORWARDER = textwrap.dedent("""\ + options { + directory "/var/cache/bind"; + forwarders { 192.168.1.2; 192.168.1.3; }; + dnssec-validation no; auth-nxdomain no; # conform to RFC1035 listen-on-v6 { any; }; }; @@ -76,7 +108,7 @@ self.assertFailsWithMessage(absent_file, "does not exist") def test_exits_when_file_has_no_options_block(self): - content = factory.getRandomString() + content = factory.make_string() self.assertContentFailsWithMessage( content, "Can't find options {} block") @@ -90,18 +122,27 @@ self.assertContentFailsWithMessage( OPTIONS_FILE, "Failed to make a backup") - def test_removes_existing_forwarders_config(self): + def test_does_not_remove_existing_forwarders_config(self): options_file = self.make_file(contents=OPTIONS_FILE_WITH_FORWARDERS) call_command( "edit_named_options", config_path=options_file, stdout=self.stdout) + options = read_isc_file(options_file) + self.assertThat(make_isc_string(options), Contains('forwarders')) + + def test_removes_existing_forwarders_config_if_migrate_set(self): + options_file = self.make_file(contents=OPTIONS_FILE_WITH_FORWARDERS) + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + # Check that the file was re-written without forwarders (since # that's now in the included file). + options = read_isc_file(options_file) self.assertThat( - options_file, - Not(FileContains( - matcher=Contains('forwarders')))) + make_isc_string(options), + Not(Contains('forwarders'))) def test_normal_operation(self): options_file = self.make_file(contents=OPTIONS_FILE) @@ -114,11 +155,10 @@ MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) # Check that the file was re-written with the include statement. + options = read_isc_file(options_file) self.assertThat( - options_file, - FileContains( - matcher=Contains( - 'include "%s";' % expected_path))) + make_isc_string(options), + Contains('include "%s";' % expected_path)) # Check that the backup was made. options_file_base = os.path.dirname(options_file) @@ -128,3 +168,95 @@ [backup_file] = files backup_file = os.path.join(options_file_base, backup_file) self.assertThat(backup_file, FileContains(OPTIONS_FILE)) + + def test_migrates_bind_config_to_database(self): + options_file = self.make_file( + contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + + upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) + self.assertThat({'192.168.1.1', '192.168.1.2'}, + Equals(set(upstream_dns.value.split()))) + + def test_migrate_combines_with_existing_forwarders(self): + options_file = self.make_file( + contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + + upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) + self.assertThat(OrderedDict.fromkeys(['192.168.1.1', '192.168.1.2']), + Equals(OrderedDict.fromkeys( + upstream_dns.value.split()))) + + options_file = self.make_file( + contents=OPTIONS_FILE_WITH_EXTRA_AND_DUP_FORWARDER) + + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + + upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) + self.assertThat( + OrderedDict.fromkeys( + ['192.168.1.1', '192.168.1.2', '192.168.1.3']), + Equals(OrderedDict.fromkeys(upstream_dns.value.split()))) + + def test_dry_run_migrates_nothing_and_prints_config(self): + options_file = self.make_file( + contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, dry_run=True, stdout=self.stdout) + + upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) + self.assertIsNone(upstream_dns) + + # Check that a proper configuration was written to stdout. + config = parse_isc_string(self.stdout.getvalue()) + self.assertIsNotNone(config) + + def test_repeat_migrations_migrate_nothing(self): + options_file = self.make_file( + contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) + backup_mock = self.patch(command_module, "back_up_existing_file") + + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + + self.assertTrue(backup_mock.called) + backup_mock.reset_mock() + + write_mock = self.patch(command_module, "write_new_named_conf_options") + + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + + self.assertFalse(backup_mock.called) + self.assertFalse(write_mock.called) + + def test_repeat_forced_migrations_write_file_anyway(self): + options_file = self.make_file( + contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) + backup_mock = self.patch(command_module, "back_up_existing_file") + + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, stdout=self.stdout) + + self.assertTrue(backup_mock.called) + backup_mock.reset_mock() + + write_mock = self.patch(command_module, "write_new_named_conf_options") + + call_command( + "edit_named_options", config_path=options_file, + migrate_conflicting_options=True, force=True, stdout=self.stdout) + + self.assertTrue(backup_mock.called) + self.assertTrue(write_mock.called) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_commands.py maas-1.7.6+bzr3376/src/maasserver/tests/test_commands.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_commands.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_commands.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test custom commands, as found in src/maasserver/management/commands.""" @@ -20,7 +20,6 @@ from apiclient.creds import convert_tuple_to_string import django from django.contrib.auth.models import User -from django.core.cache import cache from django.core.management import call_command from django.core.management.base import CommandError from maasserver.management.commands import createadmin @@ -79,8 +78,8 @@ stderr = BytesIO() stdout = BytesIO() username = factory.make_name('user') - password = factory.getRandomString() - email = factory.getRandomEmail() + password = factory.make_string() + email = factory.make_email_address() self.patch(createadmin, 'prompt_for_password').return_value = password call_command( @@ -92,22 +91,46 @@ self.assertEquals('', stdout.getvalue().strip()) self.assertTrue(user.check_password(password)) - def test_createadmin_requires_email(self): - username = factory.getRandomString() - password = factory.getRandomString() - error_text = assertCommandErrors( - self, 'createadmin', - username=username, password=password) - self.assertIn( - "You must provide an email with --email.", - error_text) + def test_createadmin_prompts_for_username_if_not_given(self): + stderr = BytesIO() + stdout = BytesIO() + username = factory.make_name('user') + password = factory.make_string() + email = factory.make_email_address() + self.patch(createadmin, 'prompt_for_username').return_value = username + + call_command( + 'createadmin', password=password, email=email, stdout=stdout, + stderr=stderr) + user = User.objects.get(username=username) + + self.assertEquals('', stderr.getvalue().strip()) + self.assertEquals('', stdout.getvalue().strip()) + self.assertTrue(user.check_password(password)) + + def test_createadmin_prompts_for_email_if_not_given(self): + stderr = BytesIO() + stdout = BytesIO() + username = factory.make_name('user') + password = factory.make_string() + email = factory.make_email_address() + self.patch(createadmin, 'prompt_for_email').return_value = email + + call_command( + 'createadmin', username=username, password=password, stdout=stdout, + stderr=stderr) + user = User.objects.get(username=username) + + self.assertEquals('', stderr.getvalue().strip()) + self.assertEquals('', stderr.getvalue().strip()) + self.assertTrue(user.check_password(password)) def test_createadmin_creates_admin(self): stderr = BytesIO() stdout = BytesIO() - username = factory.getRandomString() - password = factory.getRandomString() - email = '%s@example.com' % factory.getRandomString() + username = factory.make_string() + password = factory.make_string() + email = '%s@example.com' % factory.make_string() call_command( 'createadmin', username=username, password=password, email=email, stderr=stderr, stdout=stdout) @@ -120,32 +143,43 @@ self.assertEqual(email, user.email) def test_prompt_for_password_returns_selected_password(self): - password = factory.getRandomString() + password = factory.make_string() self.patch(createadmin, 'getpass').return_value = password self.assertEqual(password, createadmin.prompt_for_password()) def test_prompt_for_password_checks_for_consistent_password(self): - self.patch(createadmin, 'getpass', lambda x: factory.getRandomString()) + self.patch(createadmin, 'getpass', lambda x: factory.make_string()) self.assertRaises( createadmin.InconsistentPassword, createadmin.prompt_for_password) - def test_clearcache_clears_entire_cache(self): - key = factory.getRandomString() - cache.set(key, factory.getRandomString()) - call_command('clearcache') - self.assertIsNone(cache.get(key, None)) - - def test_clearcache_clears_specific_key(self): - key = factory.getRandomString() - cache.set(key, factory.getRandomString()) - another_key = factory.getRandomString() - cache.set(another_key, factory.getRandomString()) - call_command('clearcache', key=key) - self.assertIsNone(cache.get(key, None)) - self.assertIsNotNone(cache.get(another_key, None)) + def test_prompt_for_username_returns_selected_username(self): + username = factory.make_name('user') + self.patch(createadmin, 'raw_input').return_value = username + + self.assertEqual(username, createadmin.prompt_for_username()) + + def test_prompt_for_username_checks_for_empty_username(self): + self.patch(createadmin, 'raw_input', lambda x: '') + + self.assertRaises( + createadmin.EmptyUsername, + createadmin.prompt_for_username) + + def test_prompt_for_email_returns_selected_email(self): + email = factory.make_email_address() + self.patch(createadmin, 'raw_input').return_value = email + + self.assertEqual(email, createadmin.prompt_for_email()) + + def test_prompt_for_email_checks_for_empty_email(self): + self.patch(createadmin, 'raw_input', lambda x: '') + + self.assertRaises( + createadmin.EmptyEmail, + createadmin.prompt_for_email) class TestApikeyCommand(DjangoTestCase): @@ -160,7 +194,7 @@ stderr = BytesIO() out = BytesIO() stdout = getwriter("UTF-8")(out) - user = factory.make_user() + user = factory.make_User() call_command( 'apikey', username=user.username, stderr=stderr, stdout=stdout) self.assertEqual('', stderr.getvalue().strip()) @@ -175,7 +209,7 @@ stderr = BytesIO() out = BytesIO() stdout = getwriter("UTF-8")(out) - user = factory.make_user() + user = factory.make_User() num_keys = len(user.get_profile().get_authorisation_tokens()) call_command( 'apikey', username=user.username, generate=True, stderr=stderr, @@ -192,7 +226,7 @@ def test_apikey_deletes_key(self): stderr = BytesIO() stdout = BytesIO() - user = factory.make_user() + user = factory.make_User() existing_token = get_one( user.get_profile().get_authorisation_tokens()) token_string = convert_tuple_to_string( @@ -206,7 +240,7 @@ self.assertEqual(0, len(keys_after)) def test_apikey_rejects_mutually_exclusive_options(self): - user = factory.make_user() + user = factory.make_User() error_text = assertCommandErrors( self, 'apikey', username=user.username, generate=True, delete="foo") @@ -215,7 +249,7 @@ error_text) def test_apikey_rejects_deletion_of_bad_key(self): - user = factory.make_user() + user = factory.make_User() error_text = assertCommandErrors( self, 'apikey', username=user.username, delete="foo") @@ -225,7 +259,7 @@ def test_api_key_rejects_deletion_of_nonexistent_key(self): stderr = BytesIO() - user = factory.make_user() + user = factory.make_User() existing_token = get_one( user.get_profile().get_authorisation_tokens()) token_string = convert_tuple_to_string( diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_commands_set_up_dns.py maas-1.7.6+bzr3376/src/maasserver/tests/test_commands_set_up_dns.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_commands_set_up_dns.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_commands_set_up_dns.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the get_named_conf command.""" @@ -17,7 +17,6 @@ import os -from celery.app import app_or_default from django.core.management import call_command from maasserver.testing.testcase import MAASServerTestCase from maastesting.factory import factory @@ -25,6 +24,7 @@ MAAS_NAMED_CONF_NAME, MAAS_RNDC_CONF_NAME, ) +from provisioningserver.dns.testing import patch_dns_config_path from testtools.matchers import ( AllMatch, FileContains, @@ -32,14 +32,11 @@ ) -conf = app_or_default().conf - - class TestSetUpDNSCommand(MAASServerTestCase): def test_set_up_dns_writes_configuration(self): dns_conf_dir = self.make_dir() - self.patch(conf, 'DNS_CONFIG_DIR', dns_conf_dir) + patch_dns_config_path(self, dns_conf_dir) call_command('set_up_dns') named_config = os.path.join(dns_conf_dir, MAAS_NAMED_CONF_NAME) rndc_conf_path = os.path.join(dns_conf_dir, MAAS_RNDC_CONF_NAME) @@ -47,8 +44,8 @@ def test_set_up_dns_does_not_overwrite_config(self): dns_conf_dir = self.make_dir() - self.patch(conf, 'DNS_CONFIG_DIR', dns_conf_dir) - random_content = factory.getRandomString() + patch_dns_config_path(self, dns_conf_dir) + random_content = factory.make_string() factory.make_file( location=dns_conf_dir, name=MAAS_NAMED_CONF_NAME, contents=random_content) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_commands_write_dns_config.py maas-1.7.6+bzr3376/src/maasserver/tests/test_commands_write_dns_config.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_commands_write_dns_config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_commands_write_dns_config.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the write_dns_config command.""" @@ -17,7 +17,6 @@ import os -from celery.app import app_or_default from django.conf import settings from django.core.management import call_command from maasserver.enum import ( @@ -28,22 +27,20 @@ from maasserver.testing.testcase import MAASServerTestCase from netaddr import IPNetwork from provisioningserver import tasks +from provisioningserver.dns.testing import patch_dns_config_path from testtools.matchers import FileExists -conf = app_or_default().conf - - class TestWriteDNSConfigCommand(MAASServerTestCase): def test_write_dns_config_writes_zone_file(self): dns_conf_dir = self.make_dir() - self.patch(conf, 'DNS_CONFIG_DIR', dns_conf_dir) + patch_dns_config_path(self, dns_conf_dir) self.patch(settings, 'DNS_CONNECT', True) # Prevent rndc task dispatch. self.patch(tasks, "rndc_command") - domain = factory.getRandomString() - factory.make_node_group( + domain = factory.make_string() + factory.make_NodeGroup( name=domain, network=IPNetwork('192.168.0.1/24'), status=NODEGROUP_STATUS.ACCEPTED, diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_compose_preseed.py maas-1.7.6+bzr3376/src/maasserver/tests/test_compose_preseed.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_compose_preseed.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_compose_preseed.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.compose_preseed`.""" @@ -15,11 +15,24 @@ __all__ = [] from maasserver.compose_preseed import compose_preseed -from maasserver.enum import NODE_STATUS +from maasserver.enum import ( + NODE_BOOT, + NODE_STATUS, + PRESEED_TYPE, + ) +from maasserver.rpc.testing.fixtures import RunningClusterRPCFixture from maasserver.testing.factory import factory +from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils import absolute_reverse +from maastesting.matchers import MockCalledOnceWith from metadataserver.models import NodeKey +from provisioningserver.drivers.osystem import BOOT_IMAGE_PURPOSE +from provisioningserver.rpc.exceptions import ( + NoConnectionsAvailable, + NoSuchOperatingSystem, + ) +from provisioningserver.testing.os import make_osystem from testtools.matchers import ( KeysEqual, StartsWith, @@ -30,8 +43,9 @@ class TestComposePreseed(MAASServerTestCase): def test_compose_preseed_for_commissioning_node_produces_yaml(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - preseed = yaml.safe_load(compose_preseed(node)) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + preseed = yaml.safe_load( + compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) self.assertIn('datasource', preseed) self.assertIn('MAAS', preseed['datasource']) self.assertThat( @@ -40,31 +54,39 @@ 'metadata_url', 'consumer_key', 'token_key', 'token_secret')) def test_compose_preseed_for_commissioning_node_has_header(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - self.assertThat(compose_preseed(node), StartsWith("#cloud-config\n")) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + preseed = compose_preseed(PRESEED_TYPE.COMMISSIONING, node) + self.assertThat(preseed, StartsWith("#cloud-config\n")) def test_compose_preseed_includes_metadata_url(self): - node = factory.make_node(status=NODE_STATUS.READY) - self.assertIn(absolute_reverse('metadata'), compose_preseed(node)) + node = factory.make_Node(status=NODE_STATUS.READY) + node.nodegroup.accept() + self.useFixture(RunningClusterRPCFixture()) + preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) + self.assertIn(absolute_reverse('metadata'), preseed) def test_compose_preseed_for_commissioning_includes_metadata_url(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - preseed = yaml.safe_load(compose_preseed(node)) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + preseed = yaml.safe_load( + compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) self.assertEqual( absolute_reverse('metadata'), preseed['datasource']['MAAS']['metadata_url']) def test_compose_preseed_includes_node_oauth_token(self): - node = factory.make_node(status=NODE_STATUS.READY) - preseed = compose_preseed(node) + node = factory.make_Node(status=NODE_STATUS.READY) + node.nodegroup.accept() + self.useFixture(RunningClusterRPCFixture()) + preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) token = NodeKey.objects.get_token_for_node(node) self.assertIn('oauth_consumer_key=%s' % token.consumer.key, preseed) self.assertIn('oauth_token_key=%s' % token.key, preseed) self.assertIn('oauth_token_secret=%s' % token.secret, preseed) def test_compose_preseed_for_commissioning_includes_auth_token(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - preseed = yaml.safe_load(compose_preseed(node)) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + preseed = yaml.safe_load( + compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) maas_dict = preseed['datasource']['MAAS'] token = NodeKey.objects.get_token_for_node(node) self.assertEqual(token.consumer.key, maas_dict['consumer_key']) @@ -72,8 +94,10 @@ self.assertEqual(token.secret, maas_dict['token_secret']) def test_compose_preseed_valid_local_cloud_config(self): - node = factory.make_node(status=NODE_STATUS.READY) - preseed = compose_preseed(node) + node = factory.make_Node(status=NODE_STATUS.READY) + node.nodegroup.accept() + self.useFixture(RunningClusterRPCFixture()) + preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) keyname = "cloud-init/local-cloud-config" self.assertIn(keyname, preseed) @@ -91,16 +115,18 @@ data = yaml.safe_load(value) self.assertIn("manage_etc_hosts", data) - self.assertEqual(data["manage_etc_hosts"], "localhost") + self.assertEqual(data["manage_etc_hosts"], False) self.assertIn("apt_preserve_sources_list", data) self.assertEqual(data["apt_preserve_sources_list"], True) def test_compose_preseed_with_curtin_installer(self): - node = factory.make_node(status=NODE_STATUS.READY) - node.use_fastpath_installer() - preseed = compose_preseed(node) + node = factory.make_Node( + status=NODE_STATUS.READY, boot_type=NODE_BOOT.FASTPATH) + node.nodegroup.accept() + self.useFixture(RunningClusterRPCFixture()) + preseed = yaml.safe_load( + compose_preseed(PRESEED_TYPE.CURTIN, node)) - preseed = yaml.safe_load(compose_preseed(node)) self.assertIn('datasource', preseed) self.assertIn('MAAS', preseed['datasource']) self.assertThat( @@ -110,3 +136,54 @@ self.assertEqual( absolute_reverse('curtin-metadata'), preseed['datasource']['MAAS']['metadata_url']) + + def test_compose_preseed_with_osystem_compose_preseed(self): + os_name = factory.make_name('os') + osystem = make_osystem(self, os_name, [BOOT_IMAGE_PURPOSE.XINSTALL]) + make_usable_osystem(self, os_name) + compose_preseed_orig = osystem.compose_preseed + compose_preseed_mock = self.patch(osystem, 'compose_preseed') + compose_preseed_mock.side_effect = compose_preseed_orig + + node = factory.make_Node( + osystem=os_name, status=NODE_STATUS.READY) + node.nodegroup.accept() + self.useFixture(RunningClusterRPCFixture()) + token = NodeKey.objects.get_token_for_node(node) + url = absolute_reverse('curtin-metadata') + compose_preseed(PRESEED_TYPE.CURTIN, node) + self.assertThat( + compose_preseed_mock, + MockCalledOnceWith( + PRESEED_TYPE.CURTIN, + (node.system_id, node.hostname), + (token.consumer.key, token.key, token.secret), + url)) + + def test_compose_preseed_propagates_NoSuchOperatingSystem(self): + # If the cluster controller replies that the node's OS is not known to + # it, compose_preseed() simply passes the exception up. + os_name = factory.make_name('os') + osystem = make_osystem(self, os_name, [BOOT_IMAGE_PURPOSE.XINSTALL]) + make_usable_osystem(self, os_name) + compose_preseed_mock = self.patch(osystem, 'compose_preseed') + compose_preseed_mock.side_effect = NoSuchOperatingSystem + node = factory.make_Node( + osystem=os_name, status=NODE_STATUS.READY) + node.nodegroup.accept() + self.useFixture(RunningClusterRPCFixture()) + self.assertRaises( + NoSuchOperatingSystem, + compose_preseed, PRESEED_TYPE.CURTIN, node) + + def test_compose_preseed_propagates_NoConnectionsAvailable(self): + # If the region does not have any connections to the node's cluster + # controller, compose_preseed() simply passes the exception up. + os_name = factory.make_name('os') + make_osystem(self, os_name, [BOOT_IMAGE_PURPOSE.XINSTALL]) + make_usable_osystem(self, os_name) + node = factory.make_Node( + osystem=os_name, status=NODE_STATUS.READY) + self.assertRaises( + NoConnectionsAvailable, + compose_preseed, PRESEED_TYPE.CURTIN, node) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_config_forms.py maas-1.7.6+bzr3376/src/maasserver/tests/test_config_forms.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_config_forms.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_config_forms.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test config forms utilities.""" @@ -70,8 +70,8 @@ label='Field c', required=False)), ]) - fielda_value = factory.getRandomString() - fieldc_value = factory.getRandomString() + fielda_value = factory.make_string() + fieldc_value = factory.make_string() data = QueryDict( 'multi_field_field_a=%s&multi_field_field_c=%s' % ( fielda_value, fieldc_value)) @@ -97,7 +97,7 @@ ]) # Create a value that will fail validation because it's too long. - fielda_value = factory.getRandomString(10) + fielda_value = factory.make_string(10) data = QueryDict('multi_field_field_b=%s' % fielda_value) form = FakeForm(data) @@ -111,8 +111,8 @@ def test_DictCharField_skip_check_true_skips_validation(self): # Create a value that will fail validation because it's too long. - field_name = factory.getRandomString(10) - field_value = factory.getRandomString(10) + field_name = factory.make_string(10) + field_value = factory.make_string(10) # multi_field_skip_check=true will make the form accept the value # even if it's not valid. data = QueryDict( @@ -132,9 +132,9 @@ def test_DictCharField_skip_check_false(self): # Create a value that will fail validation because it's too long. - field_value = factory.getRandomString(10) - field_name = factory.getRandomString() - field_label = factory.getRandomString() + field_value = factory.make_string(10) + field_name = factory.make_string() + field_label = factory.make_string() # Force the check with multi_field_skip_check=false. data = QueryDict( 'multi_field_%s=%s&multi_field_skip_check=false' % ( @@ -165,7 +165,7 @@ required=False) char_field = forms.CharField(label='Field a') - char_value = factory.getRandomString(10) + char_value = factory.make_string(10) data = QueryDict('char_field=%s' % (char_value)) form = FakeFormRequiredFalse(data) self.assertTrue(form.is_valid()) @@ -180,7 +180,7 @@ inputs = [ {'prefix_test': 'a', 'key': 'b', 'prefix_2': 'c'}, {}, - {'b': factory.getRandomString()}, + {'b': factory.make_string()}, ] prefix = 'prefix_' expected = [ @@ -196,9 +196,9 @@ class TestDictCharWidget(MAASServerTestCase): def test_DictCharWidget_id_for_label_uses_first_fields_name(self): - names = [factory.getRandomString()] + names = [factory.make_string()] initials = [] - labels = [factory.getRandomString()] + labels = [factory.make_string()] widget = DictCharWidget( [widgets.TextInput, widgets.TextInput], names, initials, labels) self.assertEqual( @@ -206,14 +206,14 @@ widget.id_for_label(' ')) def test_DictCharWidget_renders_fieldset_with_label_and_field_names(self): - names = [factory.getRandomString(), factory.getRandomString()] + names = [factory.make_string(), factory.make_string()] initials = [] - labels = [factory.getRandomString(), factory.getRandomString()] - values = [factory.getRandomString(), factory.getRandomString()] + labels = [factory.make_string(), factory.make_string()] + values = [factory.make_string(), factory.make_string()] widget = DictCharWidget( [widgets.TextInput, widgets.TextInput, widgets.CheckboxInput], names, initials, labels, skip_check=True) - name = factory.getRandomString() + name = factory.make_string() html_widget = fromstring( '' + widget.render(name, values) + '') widget_names = XPath('fieldset/input/@name')(html_widget) @@ -228,24 +228,24 @@ def test_empty_DictCharWidget_renders_as_empty_string(self): widget = DictCharWidget( [widgets.CheckboxInput], [], [], [], skip_check=True) - self.assertEqual('', widget.render(factory.getRandomString(), '')) + self.assertEqual('', widget.render(factory.make_string(), '')) def test_DictCharWidget_value_from_datadict_values_from_data(self): # 'value_from_datadict' extracts the values corresponding to the # field as a dictionary. - names = [factory.getRandomString(), factory.getRandomString()] + names = [factory.make_string(), factory.make_string()] initials = [] - labels = [factory.getRandomString(), factory.getRandomString()] - name = factory.getRandomString() - field_1_value = factory.getRandomString() - field_2_value = factory.getRandomString() + labels = [factory.make_string(), factory.make_string()] + name = factory.make_string() + field_1_value = factory.make_string() + field_2_value = factory.make_string() # Create a query string with the field2 before the field1 and another # (unknown) value. data = QueryDict( '%s_%s=%s&%s_%s=%s&%s=%s' % ( name, names[1], field_2_value, name, names[0], field_1_value, - factory.getRandomString(), factory.getRandomString()) + factory.make_string(), factory.make_string()) ) widget = DictCharWidget( [widgets.TextInput, widgets.TextInput], names, initials, labels) @@ -254,13 +254,13 @@ widget.value_from_datadict(data, None, name)) def test_DictCharWidget_renders_with_empty_string_as_input_data(self): - names = [factory.getRandomString(), factory.getRandomString()] + names = [factory.make_string(), factory.make_string()] initials = [] - labels = [factory.getRandomString(), factory.getRandomString()] + labels = [factory.make_string(), factory.make_string()] widget = DictCharWidget( [widgets.TextInput, widgets.TextInput, widgets.CheckboxInput], names, initials, labels, skip_check=True) - name = factory.getRandomString() + name = factory.make_string() html_widget = fromstring( '' + widget.render(name, '') + '') widget_names = XPath('fieldset/input/@name')(html_widget) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_dhcp.py maas-1.7.6+bzr3376/src/maasserver/tests/test_dhcp.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_dhcp.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_dhcp.py 2015-07-10 01:27:14.000000000 +0000 @@ -20,309 +20,588 @@ from maasserver import dhcp from maasserver.dhcp import ( configure_dhcp, - get_interfaces_managed_by, + do_configure_dhcp, + make_subnet_config, + split_ipv4_ipv6_interfaces, ) -from maasserver.dns import get_dns_server_address from maasserver.enum import ( NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.models import Config -from maasserver.models.config import get_default_config +from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture +from maasserver.testing.eventloop import ( + RegionEventLoopFixture, + RunningEventLoopFixture, + ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import map_enum -from maastesting.celery import CeleryFixture +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from mock import ( + ANY, + call, + sentinel, + ) from netaddr import ( IPAddress, IPNetwork, ) -from provisioningserver import tasks -from testresources import FixtureResource -from testtools.matchers import EndsWith - +from provisioningserver.rpc.cluster import ( + ConfigureDHCPv4, + ConfigureDHCPv6, + ) +from provisioningserver.rpc.testing import always_succeed_with +from provisioningserver.utils.url import compose_URL +from testtools.matchers import ( + AllMatch, + ContainsAll, + ContainsDict, + Equals, + IsInstance, + Not, + ) -class TestDHCP(MAASServerTestCase): - resources = ( - ('celery', FixtureResource(CeleryFixture())), - ) +class TestSplitIPv4IPv6Interfaces(MAASServerTestCase): + """Tests for `split_ipv4_ipv6_interfaces`.""" - def test_get_interfaces_managed_by_returns_managed_interfaces(self): - self.patch(settings, "DHCP_CONNECT", False) - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - self.patch(settings, "DHCP_CONNECT", True) - managed_interfaces = get_interfaces_managed_by(nodegroup) - self.assertNotEqual([], managed_interfaces) - self.assertEqual(1, len(managed_interfaces)) - self.assertEqual( - list(nodegroup.nodegroupinterface_set.all()), - managed_interfaces) - - def test_get_interfaces_managed_by_returns_None_if_not_accepted(self): - unaccepted_statuses = set(map_enum(NODEGROUP_STATUS).values()) - unaccepted_statuses.remove(NODEGROUP_STATUS.ACCEPTED) - managed_interfaces = { - status: get_interfaces_managed_by( - factory.make_node_group(status=status)) - for status in unaccepted_statuses - } - self.assertEqual( - {status: None for status in unaccepted_statuses}, - managed_interfaces) + def make_ipv4_interface(self, nodegroup): + return factory.make_NodeGroupInterface( + nodegroup, network=factory.make_ipv4_network()) + + def make_ipv6_interface(self, nodegroup): + return factory.make_NodeGroupInterface( + nodegroup, network=factory.make_ipv6_network()) + + def test__separates_IPv4_from_IPv6_interfaces(self): + nodegroup = factory.make_NodeGroup() + # Create 0-2 IPv4 cluster interfaces and 0-2 IPv6 cluster interfaces. + ipv4_interfaces = [ + self.make_ipv4_interface(nodegroup) + for _ in range(random.randint(0, 2)) + ] + ipv6_interfaces = [ + self.make_ipv6_interface(nodegroup) + for _ in range(random.randint(0, 2)) + ] + interfaces = sorted( + ipv4_interfaces + ipv6_interfaces, + key=lambda *args: random.randint(0, 10)) + + ipv4_result, ipv6_result = split_ipv4_ipv6_interfaces(interfaces) + + self.assertItemsEqual(ipv4_interfaces, ipv4_result) + self.assertItemsEqual(ipv6_interfaces, ipv6_result) + + +class TestMakeSubnetConfig(MAASServerTestCase): + """Tests for `make_subnet_config`.""" + + def test__includes_all_parameters(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup()) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.assertIsInstance(config, dict) + self.assertThat( + config.keys(), + ContainsAll([ + 'subnet', + 'subnet_mask', + 'subnet_cidr', + 'broadcast_ip', + 'interface', + 'router_ip', + 'dns_servers', + 'ntp_server', + 'domain_name', + 'ip_range_low', + 'ip_range_high', + ])) - def test_configure_dhcp_stops_server_if_no_managed_interface(self): - self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'stop_dhcp_server') - nodegroup = factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, + def test__sets_dns_and_ntp_from_arguments(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup()) + dns = '%s %s' % ( + factory.make_ipv4_address(), + factory.make_ipv6_address(), ) - configure_dhcp(nodegroup) - self.assertEqual(1, dhcp.stop_dhcp_server.apply_async.call_count) + ntp = factory.make_name('ntp') + config = make_subnet_config(interface, dns_servers=dns, ntp_server=ntp) + self.expectThat(config['dns_servers'], Equals(dns)) + self.expectThat(config['ntp_server'], Equals(ntp)) + + def test__sets_domain_name_from_cluster(self): + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface(nodegroup) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.expectThat(config['domain_name'], Equals(nodegroup.name)) + + def test__sets_other_items_from_interface(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup()) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.expectThat(config['broadcast_ip'], Equals(interface.broadcast_ip)) + self.expectThat(config['interface'], Equals(interface.interface)) + self.expectThat(config['router_ip'], Equals(interface.router_ip)) + + def test__passes_IP_addresses_as_strings(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup()) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.expectThat(config['subnet'], IsInstance(unicode)) + self.expectThat(config['subnet_mask'], IsInstance(unicode)) + self.expectThat(config['subnet_cidr'], IsInstance(unicode)) + self.expectThat(config['broadcast_ip'], IsInstance(unicode)) + self.expectThat(config['router_ip'], IsInstance(unicode)) + self.expectThat(config['ip_range_low'], IsInstance(unicode)) + self.expectThat(config['ip_range_high'], IsInstance(unicode)) + + def test__defines_IPv4_subnet(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup(), network=IPNetwork('10.9.8.7/24')) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.expectThat(config['subnet'], Equals('10.9.8.0')) + self.expectThat(config['subnet_mask'], Equals('255.255.255.0')) + self.expectThat(config['subnet_cidr'], Equals('10.9.8.0/24')) + + def test__defines_IPv6_subnet(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup(), + network=IPNetwork('fd38:c341:27da:c831::/64')) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + # Don't expect a specific literal value, like we do for IPv4; there + # are different spellings. + self.expectThat( + IPAddress(config['subnet']), + Equals(IPAddress('fd38:c341:27da:c831::'))) + # (Netmask is not used for the IPv6 config, so ignore it.) + self.expectThat( + IPNetwork(config['subnet_cidr']), + Equals(IPNetwork('fd38:c341:27da:c831::/64'))) + + def test__passes_dynamic_range(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup()) + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.expectThat( + (config['ip_range_low'], config['ip_range_high']), + Equals((interface.ip_range_low, interface.ip_range_high))) + self.expectThat( + config['ip_range_low'], Not(Equals(interface.static_ip_range_low))) + + def test__doesnt_convert_None_router_ip(self): + interface = factory.make_NodeGroupInterface(factory.make_NodeGroup()) + interface.router_ip = None + interface.save() + config = make_subnet_config( + interface, factory.make_name('dns'), factory.make_name('ntp')) + self.assertEqual('', config['router_ip']) + + +class TestDoConfigureDHCP(MAASServerTestCase): + """Tests for `do_configure_dhcp`.""" + + scenarios = ( + ("DHCPv4", { + "command": ConfigureDHCPv4, + "make_network": factory.make_ipv4_network, + "make_address": factory.make_ipv4_address, + "ip_version": 4, + }), + ("DHCPv6", { + "command": ConfigureDHCPv6, + "make_network": factory.make_ipv6_network, + "make_address": factory.make_ipv6_address, + "ip_version": 6, + }), + ) - def test_configure_dhcp_obeys_DHCP_CONNECT(self): - self.patch(settings, "DHCP_CONNECT", False) - self.patch(dhcp, 'write_dhcp_config') - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - self.assertEqual(0, dhcp.write_dhcp_config.apply_async.call_count) - - def test_configure_dhcp_writes_dhcp_config(self): - mocked_task = self.patch(dhcp, 'write_dhcp_config') - self.patch( - settings, 'DEFAULT_MAAS_URL', - 'http://%s/' % factory.getRandomIPAddress()) - self.patch(settings, "DHCP_CONNECT", True) - nodegroup = factory.make_node_group( + def prepare_rpc(self, nodegroup): + """Set up test case for speaking RPC to `nodegroup`. + + :param nodegroup: A cluster. It will "run" a mock RPC service. + :return: Protocol, Command stub + """ + self.useFixture(RegionEventLoopFixture('rpc')) + self.useFixture(RunningEventLoopFixture()) + fixture = self.useFixture(MockLiveRegionToClusterRPCFixture()) + cluster = fixture.makeCluster(nodegroup, self.command) + return cluster, getattr(cluster, self.command.commandName) + + def test__configures_dhcp(self): + dns_server = self.make_address() + maas_url = compose_URL("http://", dns_server) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + dhcp_key=factory.make_name('key'), + network=self.make_network(), + maas_url=maas_url) + ntp_server = factory.make_name('ntp') + + protocol, command_stub = self.prepare_rpc(nodegroup) + command_stub.side_effect = always_succeed_with({}) + + # Although the above nodegroup has managed interfaces, we pass the + # empty list here; do_configure_dhcp() dutifully believes us. + do_configure_dhcp(self.ip_version, nodegroup, [], ntp_server) + + self.assertThat( + command_stub, MockCalledOnceWith( + ANY, omapi_key=nodegroup.dhcp_key, subnet_configs=[])) + + def test__configures_dhcp_with_subnets(self): + dns_server = self.make_address() + maas_url = compose_URL("http://", dns_server) + nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ACCEPTED, - dhcp_key=factory.getRandomString(), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + dhcp_key=factory.make_string(), interface=factory.make_name('eth'), - network=IPNetwork("192.168.102.0/22")) + network=self.make_network(), + maas_url=maas_url) # Create a second DHCP-managed interface. - factory.make_node_group_interface( - nodegroup=nodegroup, - interface=factory.make_name('eth'), - network=IPNetwork("10.1.1/24"), + factory.make_NodeGroupInterface( + nodegroup=nodegroup, interface=factory.make_name('eth'), management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + network=self.make_network()) + ntp_server = factory.make_name('ntp') + interfaces = nodegroup.get_managed_interfaces() + + protocol, command_stub = self.prepare_rpc(nodegroup) + command_stub.side_effect = always_succeed_with({}) + + do_configure_dhcp(self.ip_version, nodegroup, interfaces, ntp_server) + + expected_subnet_configs = [ + make_subnet_config(interface, dns_server, ntp_server) + for interface in nodegroup.get_managed_interfaces() + ] + + self.assertThat( + command_stub, MockCalledOnceWith( + ANY, subnet_configs=expected_subnet_configs, + omapi_key=nodegroup.dhcp_key, + )) + + +class TestDoConfigureDHCPWrappers(MAASServerTestCase): + """Tests for `do_configure_dhcp` wrapper functions.""" + + def test_configure_dhcpv4_calls_do_configure_dhcp(self): + do_configure_dhcp = self.patch_autospec(dhcp, "do_configure_dhcp") + dhcp.configure_dhcpv4( + sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server) + self.assertThat(do_configure_dhcp, MockCalledOnceWith( + 4, sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server)) + + def test_configure_dhcpv6_calls_do_configure_dhcp(self): + do_configure_dhcp = self.patch_autospec(dhcp, "do_configure_dhcp") + dhcp.configure_dhcpv6( + sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server) + self.assertThat(do_configure_dhcp, MockCalledOnceWith( + 6, sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server)) + + +class TestConfigureDHCP(MAASServerTestCase): + """Tests for `configure_dhcp`.""" + + def patch_configure_funcs(self): + """Patch `configure_dhcpv4` and `configure_dhcpv6`.""" + return ( + self.patch(dhcp, 'configure_dhcpv4'), + self.patch(dhcp, 'configure_dhcpv6'), ) - configure_dhcp(nodegroup) - dhcp_subnets = [] - for interface in nodegroup.get_managed_interfaces(): - dhcp_params = [ - 'interface', - 'subnet_mask', - 'broadcast_ip', - 'router_ip', - 'ip_range_low', - 'ip_range_high', - ] + def make_cluster(self, status=NODEGROUP_STATUS.ACCEPTED, omapi_key=None, + **kwargs): + """Create a `NodeGroup` without interfaces. + + Status defaults to `ACCEPTED`. + """ + if omapi_key is None: + # Set an arbitrary OMAPI key, so that the cluster won't need to + # shell out to create one. + omapi_key = factory.make_name('key') + return factory.make_NodeGroup( + status=status, dhcp_key=omapi_key, **kwargs) + + def make_cluster_interface(self, network, cluster=None, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + **kwargs): + if cluster is None: + cluster = self.make_cluster() + return factory.make_NodeGroupInterface( + cluster, network=network, management=management, **kwargs) + + def make_ipv4_interface(self, cluster=None, **kwargs): + """Create an IPv4 `NodeGroupInterface` for `cluster`. + + The interface defaults to being managed. + """ + return self.make_cluster_interface( + factory.make_ipv4_network(), cluster, **kwargs) + + def make_ipv6_interface(self, cluster=None, **kwargs): + """Create an IPv6 `NodeGroupInterface` for `cluster`. + + The interface defaults to being managed. + """ + return self.make_cluster_interface( + factory.make_ipv6_network(), cluster, **kwargs) + + def test__obeys_DHCP_CONNECT(self): + configure_dhcpv4, configure_dhcpv6 = self.patch_configure_funcs() + cluster = self.make_cluster() + self.make_ipv4_interface(cluster) + self.make_ipv6_interface(cluster) + self.patch(settings, "DHCP_CONNECT", False) - dhcp_subnet = { - param: getattr(interface, param) - for param in dhcp_params} - dhcp_subnet["dns_servers"] = get_dns_server_address() - dhcp_subnet["ntp_server"] = get_default_config()['ntp_server'] - dhcp_subnet["domain_name"] = nodegroup.name - dhcp_subnet["subnet"] = unicode( - IPAddress(interface.ip_range_low) & - IPAddress(interface.subnet_mask)) - dhcp_subnets.append(dhcp_subnet) - - expected_params = {} - expected_params["omapi_key"] = nodegroup.dhcp_key - expected_params["dhcp_interfaces"] = ' '.join([ - interface.interface - for interface in nodegroup.get_managed_interfaces()]) - expected_params["dhcp_subnets"] = dhcp_subnets - - args, kwargs = mocked_task.apply_async.call_args - result_params = kwargs['kwargs'] - # The check that the callback is correct is done in - # test_configure_dhcp_restart_dhcp_server. - del result_params['callback'] - - self.assertEqual(expected_params, result_params) - - def test_dhcp_config_uses_dns_server_from_cluster_controller(self): - mocked_task = self.patch(dhcp, 'write_dhcp_config') - ip = factory.getRandomIPAddress() - maas_url = 'http://%s/' % ip - nodegroup = factory.make_node_group( - maas_url=maas_url, - status=NODEGROUP_STATUS.ACCEPTED, - dhcp_key=factory.getRandomString(), - interface=factory.make_name('eth'), - network=IPNetwork("192.168.102.0/22")) + configure_dhcp(cluster) + + self.expectThat(configure_dhcpv4, MockNotCalled()) + self.expectThat(configure_dhcpv6, MockNotCalled()) + + def test__does_not_configure_interfaces_if_nodegroup_not_accepted(self): + configure_dhcpv4, configure_dhcpv6 = self.patch_configure_funcs() + cluster = self.make_cluster(status=NODEGROUP_STATUS.PENDING) + self.make_ipv4_interface(cluster) + self.make_ipv6_interface(cluster) self.patch(settings, "DHCP_CONNECT", True) - configure_dhcp(nodegroup) - kwargs = mocked_task.apply_async.call_args[1]['kwargs'] - self.assertEqual(ip, kwargs['dhcp_subnets'][0]['dns_servers']) + configure_dhcp(cluster) - def test_configure_dhcp_restarts_dhcp_server(self): - self.patch(tasks, "sudo_write_file") - mocked_check_call = self.patch(tasks, "call_and_check") - self.patch(settings, "DHCP_CONNECT", True) - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - configure_dhcp(nodegroup) - self.assertEqual( - mocked_check_call.call_args[0][0], - ['sudo', '-n', 'service', 'maas-dhcp-server', 'restart']) - - def test_configure_dhcp_is_called_with_valid_dhcp_key(self): - self.patch(dhcp, 'write_dhcp_config') - self.patch(settings, "DHCP_CONNECT", True) - nodegroup = factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, dhcp_key='') - configure_dhcp(nodegroup) - args, kwargs = dhcp.write_dhcp_config.apply_async.call_args - self.assertThat(kwargs['kwargs']['omapi_key'], EndsWith('==')) + self.expectThat(configure_dhcpv4, MockCalledOnceWith(cluster, [], ANY)) + self.expectThat(configure_dhcpv6, MockCalledOnceWith(cluster, [], ANY)) - def test_dhcp_config_gets_written_when_nodegroup_becomes_active(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) + def test__configures_dhcpv4(self): + getClientFor = self.patch_autospec(dhcp, "getClientFor") + ip = factory.make_ipv4_address() + cluster = self.make_cluster(maas_url='http://%s/' % ip) + self.make_ipv4_interface(cluster) self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - nodegroup.accept() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) - def test_dhcp_config_gets_written_when_nodegroup_name_changes(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) + configure_dhcp(cluster) + + self.assertThat(getClientFor, MockCallsMatch( + call(cluster.uuid), call(cluster.uuid))) + client = getClientFor.return_value + self.assertThat(client, MockCallsMatch( + call(ANY, omapi_key=ANY, subnet_configs=ANY), + call(ANY, omapi_key=ANY, subnet_configs=ANY), + )) + + subnet_configs = [ + subnet_config + for call_args in client.call_args_list + for subnet_config in call_args[1]['subnet_configs'] + ] + self.assertThat( + subnet_configs, AllMatch( + ContainsDict({"dns_servers": Equals(ip)}))) + + def test__passes_only_IPv4_interfaces_to_DHCPv4(self): + configure_dhcpv4, _ = self.patch_configure_funcs() + cluster = self.make_cluster() + ipv4_interface = self.make_ipv4_interface(cluster) + self.make_ipv6_interface(cluster) self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - new_name = factory.make_name('dns name'), - nodegroup.name = new_name - nodegroup.save() + configure_dhcp(cluster) - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat( + configure_dhcpv4, + MockCalledOnceWith(cluster, [ipv4_interface], ANY)) - def test_write_dhcp_config_task_routed_to_nodegroup_worker(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) + def test__passes_only_IPv6_interfaces_to_DHCPv6(self): + configure_dhcpv4, configure_dhcpv6 = self.patch_configure_funcs() + cluster = self.make_cluster() + ipv6_interface = self.make_ipv6_interface(cluster) + self.make_ipv4_interface(cluster) self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - nodegroup.accept() - args, kwargs = dhcp.write_dhcp_config.apply_async.call_args - self.assertEqual(nodegroup.work_queue, kwargs['queue']) - def test_write_dhcp_config_restart_task_routed_to_nodegroup_worker(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) + configure_dhcp(cluster) + + self.assertThat( + configure_dhcpv6, + MockCalledOnceWith(cluster, [ipv6_interface], ANY)) + + def test__uses_ntp_server_from_config(self): + configure_dhcpv4, configure_dhcpv6 = self.patch_configure_funcs() + cluster = self.make_cluster() + self.make_ipv4_interface(cluster) + self.patch(settings, "DHCP_CONNECT", True) + + configure_dhcp(cluster) + + ntp_server = Config.objects.get_config('ntp_server') + self.assertThat( + configure_dhcpv4, + MockCalledOnceWith(ANY, ANY, ntp_server)) + self.assertThat( + configure_dhcpv6, + MockCalledOnceWith(ANY, ANY, ntp_server)) + + +class TestDHCPConnect(MAASServerTestCase): + """Tests for DHCP signals triggered when saving a cluster interface.""" + + def setUp(self): + super(TestDHCPConnect, self).setUp() + self.patch_autospec(dhcp, "configure_dhcp") + + def test_dhcp_config_gets_written_when_nodegroup_becomes_active(self): + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.PENDING, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.patch(settings, "DHCP_CONNECT", True) - self.patch(tasks, 'sudo_write_file') - task = self.patch(dhcp, 'restart_dhcp_server') + nodegroup.accept() - args, kwargs = task.subtask.call_args - self.assertEqual(nodegroup.work_queue, kwargs['options']['queue']) + + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) + + def test_dhcp_config_gets_written_when_nodegroup_name_changes(self): + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + self.patch(settings, "DHCP_CONNECT", True) + + nodegroup.name = factory.make_name('domain') + nodegroup.save() + + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_IP_changes(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.nodegroupinterface_set.all() self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - interface.ip = factory.getRandomIPInNetwork( + interface.ip = factory.pick_ip_in_network( interface.network, but_not=[interface.ip]) interface.save() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_management_changes(self): - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) [interface] = nodegroup.nodegroupinterface_set.all() self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP interface.save() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_name_changes(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') interface.interface = factory.make_name('itf') interface.save() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_netmask_changes(self): - network = factory.getRandomNetwork(slash='255.255.255.0') - nodegroup = factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, network=network) + network = factory.make_ipv4_network(slash='255.255.255.0') + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, network=network, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') interface.subnet_mask = '255.255.0.0' interface.save() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_router_ip_changes(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - new_router_ip = factory.getRandomIPInNetwork( - interface.network, but_not=[interface.router_ip]) - interface.router_ip = new_router_ip + interface.router_ip = factory.pick_ip_in_network( + interface.network, but_not=[interface.router_ip]) interface.save() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_ip_range_changes(self): - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - low, high = factory.make_ip_range( - interface.network, - but_not=(interface.ip_range_low, interface.ip_range_high)) - interface.ip_range_low = unicode(low) - interface.ip_range_high = unicode(high) + interface.ip_range_low = unicode( + IPAddress(interface.ip_range_low) + 1) + interface.ip_range_high = unicode( + IPAddress(interface.ip_range_high) - 1) interface.save() - self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) + self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_is_not_written_when_foreign_dhcp_changes(self): - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() - self.patch(dhcp, 'write_dhcp_config') self.patch(settings, "DHCP_CONNECT", True) - interface.foreign_dhcp = factory.getRandomIPInNetwork( - interface.network) + interface.foreign_dhcp = factory.pick_ip_in_network(interface.network) interface.save() - self.assertEqual([], dhcp.write_dhcp_config.apply_async.mock_calls) + self.assertThat(dhcp.configure_dhcp, MockNotCalled()) def test_dhcp_config_gets_written_when_ntp_server_changes(self): # When the "ntp_server" Config item is changed, check that all # nodegroups get their DHCP config re-written. num_active_nodegroups = random.randint(1, 10) num_inactive_nodegroups = random.randint(1, 10) - for x in range(num_active_nodegroups): - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - for x in range(num_inactive_nodegroups): - factory.make_node_group(status=NODEGROUP_STATUS.PENDING) - # Silence stop_dhcp_server: it will be called for the inactive - # nodegroups. - self.patch(dhcp, 'stop_dhcp_server') - - self.patch(settings, "DHCP_CONNECT", True) - self.patch(dhcp, 'write_dhcp_config') - - Config.objects.set_config("ntp_server", factory.getRandomIPAddress()) - - self.assertEqual( - num_active_nodegroups, - dhcp.write_dhcp_config.apply_async.call_count) + for _ in range(num_active_nodegroups): + factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + for _ in range(num_inactive_nodegroups): + factory.make_NodeGroup( + status=NODEGROUP_STATUS.PENDING, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + self.patch(settings, "DHCP_CONNECT", True) + + Config.objects.set_config("ntp_server", factory.make_ipv4_address()) + + # Every nodegroup is updated, including those that are PENDING. + expected_call_one_nodegroup = [call(ANY)] + expected_calls = expected_call_one_nodegroup * ( + num_active_nodegroups + num_inactive_nodegroups) + self.assertThat(dhcp.configure_dhcp, MockCallsMatch(*expected_calls)) + + def test_dhcp_config_gets_written_when_managed_interface_is_deleted(self): + interface = factory.make_NodeGroupInterface( + factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + self.patch(settings, "DHCP_CONNECT", True) + + interface.delete() + + self.assertThat( + dhcp.configure_dhcp, MockCalledOnceWith(interface.nodegroup)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_dns.py maas-1.7.6+bzr3376/src/maasserver/tests/test_dns.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_dns.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_dns.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,623 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test DNS module.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - - -from itertools import islice -import socket - -from celery.task import task -from django.conf import settings -from django.core.management import call_command -from maasserver import ( - dns, - server_address, - ) -from maasserver.enum import ( - NODEGROUP_STATUS, - NODEGROUPINTERFACE_MANAGEMENT, - ) -from maasserver.models import ( - Config, - node as node_module, - ) -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import map_enum -from maastesting.celery import CeleryFixture -from maastesting.fakemethod import FakeMethod -from maastesting.matchers import MockCalledOnceWith -from mock import ( - ANY, - call, - Mock, - ) -from netaddr import ( - IPNetwork, - IPRange, - ) -from provisioningserver import tasks -from provisioningserver.dns.config import ( - conf, - DNSForwardZoneConfig, - DNSReverseZoneConfig, - DNSZoneConfigBase, - ) -from provisioningserver.dns.utils import generated_hostname -from provisioningserver.testing.bindfixture import BINDServer -from provisioningserver.testing.tests.test_bindfixture import dig_call -from rabbitfixture.server import allocate_ports -from testresources import FixtureResource -from testtools import TestCase -from testtools.matchers import ( - IsInstance, - MatchesAll, - MatchesListwise, - MatchesStructure, - ) - - -class TestDNSUtilities(MAASServerTestCase): - - def test_zone_serial_parameters(self): - self.assertThat( - dns.zone_serial, - MatchesStructure.byEquality( - maxvalue=2 ** 32 - 1, - minvalue=1, - incr=1, - ) - ) - - def test_next_zone_serial_returns_sequence(self): - initial = int(dns.next_zone_serial()) - self.assertSequenceEqual( - ['%0.10d' % i for i in range(initial + 1, initial + 11)], - [dns.next_zone_serial() for i in range(initial, initial + 10)]) - - def patch_DEFAULT_MAAS_URL_with_random_values(self, hostname=None): - if hostname is None: - hostname = factory.getRandomString() - url = 'http://%s:%d/%s' % ( - hostname, factory.getRandomPort(), factory.getRandomString()) - self.patch(settings, 'DEFAULT_MAAS_URL', url) - - def test_get_dns_server_address_resolves_hostname(self): - ip = factory.getRandomIPAddress() - resolver = FakeMethod(result=ip) - self.patch(server_address, 'gethostbyname', resolver) - hostname = factory.make_hostname() - self.patch_DEFAULT_MAAS_URL_with_random_values(hostname=hostname) - self.assertEqual( - (ip, [(hostname, )]), - (dns.get_dns_server_address(), resolver.extract_args())) - - def test_get_dns_server_address_raises_if_hostname_doesnt_resolve(self): - self.patch( - dns, 'get_maas_facing_server_address', - FakeMethod(failure=socket.error)) - self.patch_DEFAULT_MAAS_URL_with_random_values() - self.assertRaises(dns.DNSException, dns.get_dns_server_address) - - def test_get_dns_server_address_logs_warning_if_ip_is_localhost(self): - logger = self.patch(dns, 'logger') - self.patch( - dns, 'get_maas_facing_server_address', - Mock(return_value='127.0.0.1')) - dns.get_dns_server_address() - self.assertEqual( - call(dns.WARNING_MESSAGE % '127.0.0.1'), - logger.warn.call_args) - - def test_get_dns_server_address_uses_nodegroup_maas_url(self): - ip = factory.getRandomIPAddress() - resolver = FakeMethod(result=ip) - self.patch(server_address, 'gethostbyname', resolver) - hostname = factory.make_hostname() - maas_url = 'http://%s' % hostname - nodegroup = factory.make_node_group(maas_url=maas_url) - self.assertEqual( - (ip, [(hostname, )]), - (dns.get_dns_server_address(nodegroup), resolver.extract_args())) - - -class TestLazyDict(TestCase): - """Tests for `lazydict`.""" - - def test_empty_initially(self): - self.assertEqual({}, dns.lazydict(Mock())) - - def test_populates_on_demand(self): - value = factory.make_name('value') - value_dict = dns.lazydict(lambda key: value) - key = factory.make_name('key') - retrieved_value = value_dict[key] - self.assertEqual(value, retrieved_value) - self.assertEqual({key: value}, value_dict) - - def test_remembers_elements(self): - value_dict = dns.lazydict(lambda key: factory.make_name('value')) - key = factory.make_name('key') - self.assertEqual(value_dict[key], value_dict[key]) - - def test_holds_one_value_per_key(self): - value_dict = dns.lazydict(lambda key: key) - key1 = factory.make_name('key') - key2 = factory.make_name('key') - - value1 = value_dict[key1] - value2 = value_dict[key2] - - self.assertEqual((key1, key2), (value1, value2)) - self.assertEqual({key1: key1, key2: key2}, value_dict) - - -class TestDNSConfigModifications(MAASServerTestCase): - - resources = ( - ("celery", FixtureResource(CeleryFixture())), - ) - - def setUp(self): - super(TestDNSConfigModifications, self).setUp() - self.bind = self.useFixture(BINDServer()) - self.patch(conf, 'DNS_CONFIG_DIR', self.bind.config.homedir) - - # Use a random port for rndc. - self.patch(conf, 'DNS_RNDC_PORT', allocate_ports("localhost")[0]) - # This simulates what should happen when the package is - # installed: - # Create MAAS-specific DNS configuration files. - call_command('set_up_dns') - # Register MAAS-specific DNS configuration files with the - # system's BIND instance. - call_command( - 'get_named_conf', edit=True, - config_path=self.bind.config.conf_file) - # Reload BIND. - self.bind.runner.rndc('reload') - - def create_managed_nodegroup(self): - return factory.make_node_group( - network=IPNetwork('192.168.0.1/24'), - status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - - def create_nodegroup_with_lease(self, lease_number=1, nodegroup=None): - if nodegroup is None: - nodegroup = self.create_managed_nodegroup() - [interface] = nodegroup.get_managed_interfaces() - node = factory.make_node( - nodegroup=nodegroup) - mac = factory.make_mac_address(node=node) - ips = IPRange(interface.ip_range_low, interface.ip_range_high) - lease_ip = unicode(islice(ips, lease_number, lease_number + 1).next()) - lease = factory.make_dhcp_lease( - nodegroup=nodegroup, mac=mac.mac_address, ip=lease_ip) - # Simulate that this lease was created by - # DHCPLease.objects.update_leases: update its DNS config. - dns.change_dns_zones([nodegroup]) - return nodegroup, node, lease - - def dig_resolve(self, fqdn): - """Resolve `fqdn` using dig. Returns a list of results.""" - return dig_call( - port=self.bind.config.port, - commands=[fqdn, '+short']).split('\n') - - def dig_reverse_resolve(self, ip): - """Reverse resolve `ip` using dig. Returns a list of results.""" - return dig_call( - port=self.bind.config.port, - commands=['-x', ip, '+short']).split('\n') - - def assertDNSMatches(self, hostname, domain, ip): - fqdn = "%s.%s" % (hostname, domain) - autogenerated_hostname = '%s.' % generated_hostname(ip, domain) - forward_lookup_result = self.dig_resolve(fqdn) - if '%s.' % fqdn == autogenerated_hostname: - # If the fqdn is an autogenerated hostname, it resolves to the IP - # address (A record). - expected_results = [ip] - else: - # If the fqdn is a custom hostname, it resolves to the - # autogenerated hostname (CNAME record) and the IP address - # (A record). - expected_results = [autogenerated_hostname, ip] - self.assertEqual( - expected_results, forward_lookup_result, - "Failed to resolve '%s' (results: '%s')." % ( - fqdn, ','.join(forward_lookup_result))) - # A reverse lookup on the IP returns the autogenerated - # hostname. - reverse_lookup_result = self.dig_reverse_resolve(ip) - self.assertEqual( - [autogenerated_hostname], reverse_lookup_result, - "Failed to reverse resolve '%s' (results: '%s')." % ( - fqdn, ','.join(reverse_lookup_result))) - - def test_add_zone_loads_dns_zone(self): - nodegroup, node, lease = self.create_nodegroup_with_lease() - self.patch(settings, 'DNS_CONNECT', True) - dns.add_zone(nodegroup) - self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) - - def test_change_dns_zone_changes_dns_zone(self): - nodegroup, _, _ = self.create_nodegroup_with_lease() - self.patch(settings, 'DNS_CONNECT', True) - dns.write_full_dns_config() - nodegroup, new_node, new_lease = ( - self.create_nodegroup_with_lease( - nodegroup=nodegroup, lease_number=2)) - dns.change_dns_zones(nodegroup) - self.assertDNSMatches(new_node.hostname, nodegroup.name, new_lease.ip) - - def test_is_dns_enabled_return_false_if_DNS_CONNECT_False(self): - self.patch(settings, 'DNS_CONNECT', False) - self.assertFalse(dns.is_dns_enabled()) - - def test_is_dns_enabled_return_True_if_DNS_CONNECT_True(self): - self.patch(settings, 'DNS_CONNECT', True) - self.assertTrue(dns.is_dns_enabled()) - - def test_is_dns_in_use_return_False_no_configured_interface(self): - self.assertFalse(dns.is_dns_in_use()) - - def test_is_dns_in_use_return_True_if_configured_interface(self): - self.create_managed_nodegroup() - self.assertTrue(dns.is_dns_in_use()) - - def test_write_full_dns_loads_full_dns_config(self): - nodegroup, node, lease = self.create_nodegroup_with_lease() - self.patch(settings, 'DNS_CONNECT', True) - dns.write_full_dns_config() - self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) - - def test_write_full_dns_passes_reload_retry_parameter(self): - self.patch(settings, 'DNS_CONNECT', True) - recorder = FakeMethod() - self.create_managed_nodegroup() - - @task - def recorder_task(*args, **kwargs): - return recorder(*args, **kwargs) - self.patch(tasks, 'rndc_command', recorder_task) - dns.write_full_dns_config(reload_retry=True) - self.assertEqual( - ([(['reload'], True)]), recorder.extract_args()) - - def test_write_full_dns_passes_upstream_dns_parameter(self): - self.patch(settings, 'DNS_CONNECT', True) - self.create_managed_nodegroup() - random_ip = factory.getRandomIPAddress() - Config.objects.set_config("upstream_dns", random_ip) - patched_task = self.patch(dns.tasks.write_full_dns_config, "delay") - dns.write_full_dns_config() - self.assertThat(patched_task, MockCalledOnceWith( - zones=ANY, callback=ANY, upstream_dns=random_ip)) - - def test_write_full_dns_doesnt_call_task_it_no_interface_configured(self): - self.patch(settings, 'DNS_CONNECT', True) - patched_task = self.patch(dns.tasks.write_full_dns_config, "delay") - dns.write_full_dns_config() - self.assertEqual(0, patched_task.call_count) - - def test_dns_config_has_NS_record(self): - ip = factory.getRandomIPAddress() - self.patch(settings, 'DEFAULT_MAAS_URL', 'http://%s/' % ip) - nodegroup, node, lease = self.create_nodegroup_with_lease() - self.patch(settings, 'DNS_CONNECT', True) - dns.write_full_dns_config() - # Get the NS record for the zone 'nodegroup.name'. - ns_record = dig_call( - port=self.bind.config.port, - commands=[nodegroup.name, 'NS', '+short']) - # Resolve that hostname. - ip_of_ns_record = dig_call( - port=self.bind.config.port, commands=[ns_record, '+short']) - self.assertEqual(ip, ip_of_ns_record) - - def test_add_nodegroup_creates_DNS_zone(self): - self.patch(settings, "DNS_CONNECT", True) - network = IPNetwork('192.168.7.1/24') - ip = factory.getRandomIPInNetwork(network) - nodegroup = factory.make_node_group( - network=network, status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - self.assertDNSMatches(generated_hostname(ip), nodegroup.name, ip) - - def test_edit_nodegroupinterface_updates_DNS_zone(self): - self.patch(settings, "DNS_CONNECT", True) - old_network = IPNetwork('192.168.7.1/24') - old_ip = factory.getRandomIPInNetwork(old_network) - nodegroup = factory.make_node_group( - network=old_network, status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - [interface] = nodegroup.get_managed_interfaces() - # Edit nodegroup's network information to '192.168.44.1/24' - interface.ip = '192.168.44.7' - interface.router_ip = '192.168.44.14' - interface.broadcast_ip = '192.168.44.255' - interface.netmask = '255.255.255.0' - interface.ip_range_low = '192.168.44.0' - interface.ip_range_high = '192.168.44.255' - interface.save() - ip = factory.getRandomIPInNetwork(IPNetwork('192.168.44.1/24')) - # The ip from the old network does not resolve anymore. - self.assertEqual([''], self.dig_resolve(generated_hostname(old_ip))) - self.assertEqual([''], self.dig_reverse_resolve(old_ip)) - # The ip from the new network resolves. - self.assertDNSMatches(generated_hostname(ip), nodegroup.name, ip) - - def test_changing_interface_management_updates_DNS_zone(self): - self.patch(settings, "DNS_CONNECT", True) - network = IPNetwork('192.168.7.1/24') - ip = factory.getRandomIPInNetwork(network) - nodegroup = factory.make_node_group( - network=network, status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - [interface] = nodegroup.get_managed_interfaces() - interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED - interface.save() - self.assertEqual([''], self.dig_resolve(generated_hostname(ip))) - self.assertEqual([''], self.dig_reverse_resolve(ip)) - - def test_delete_nodegroup_disables_DNS_zone(self): - self.patch(settings, "DNS_CONNECT", True) - network = IPNetwork('192.168.7.1/24') - ip = factory.getRandomIPInNetwork(network) - nodegroup = factory.make_node_group( - network=network, status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - nodegroup.delete() - self.assertEqual([''], self.dig_resolve(generated_hostname(ip))) - self.assertEqual([''], self.dig_reverse_resolve(ip)) - - def test_add_node_updates_zone(self): - self.patch(settings, "DNS_CONNECT", True) - nodegroup, node, lease = self.create_nodegroup_with_lease() - self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) - - def test_delete_node_updates_zone(self): - self.patch(settings, "DNS_CONNECT", True) - nodegroup, node, lease = self.create_nodegroup_with_lease() - # Prevent omshell task dispatch. - self.patch(node_module, "remove_dhcp_host_map") - node.delete() - fqdn = "%s.%s" % (node.hostname, nodegroup.name) - self.assertEqual([''], self.dig_resolve(fqdn)) - - def test_change_node_hostname_updates_zone(self): - self.patch(settings, "DNS_CONNECT", True) - nodegroup, node, lease = self.create_nodegroup_with_lease() - node.hostname = factory.make_name('hostname') - node.save() - self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) - - def test_change_node_other_field_does_not_update_zone(self): - self.patch(settings, "DNS_CONNECT", True) - nodegroup, node, lease = self.create_nodegroup_with_lease() - recorder = FakeMethod() - self.patch(DNSZoneConfigBase, 'write_config', recorder) - node.error = factory.getRandomString() - node.save() - self.assertEqual(0, recorder.call_count) - - -def forward_zone(domain, *networks): - """ - Returns a matcher for a :class:`DNSForwardZoneConfig` with the given - domain and networks. - """ - networks = {IPNetwork(network) for network in networks} - return MatchesAll( - IsInstance(DNSForwardZoneConfig), - MatchesStructure.byEquality( - domain=domain, _networks=networks)) - - -def reverse_zone(domain, network): - """ - Returns a matcher for a :class:`DNSReverseZoneConfig` with the given - domain and network. - """ - network = network if network is None else IPNetwork(network) - return MatchesAll( - IsInstance(DNSReverseZoneConfig), - MatchesStructure.byEquality( - domain=domain, _network=network)) - - -class TestZoneGenerator(MAASServerTestCase): - """Tests for :class:x`dns.ZoneGenerator`.""" - - def make_node_group(self, **kwargs): - """Create an accepted nodegroup with a managed interface.""" - return factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, **kwargs) - - def test_get_forward_nodegroups_returns_empty_for_unknown_domain(self): - self.assertEqual( - set(), - dns.ZoneGenerator._get_forward_nodegroups( - factory.make_name('domain'))) - - def test_get_forward_nodegroups_returns_empty_for_no_domains(self): - self.assertEqual(set(), dns.ZoneGenerator._get_forward_nodegroups([])) - - def test_get_forward_nodegroups_returns_dns_managed_nodegroups(self): - domain = factory.make_name('domain') - nodegroup = self.make_node_group(name=domain) - self.assertEqual( - {nodegroup}, - dns.ZoneGenerator._get_forward_nodegroups([domain])) - - def test_get_forward_nodegroups_includes_multiple_domains(self): - nodegroups = [self.make_node_group() for _ in range(3)] - self.assertEqual( - set(nodegroups), - dns.ZoneGenerator._get_forward_nodegroups( - [nodegroup.name for nodegroup in nodegroups])) - - def test_get_forward_nodegroups_ignores_non_dns_nodegroups(self): - domain = factory.make_name('domain') - managed_nodegroup = self.make_node_group(name=domain) - factory.make_node_group( - name=domain, status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - factory.make_node_group( - name=domain, status=NODEGROUP_STATUS.ACCEPTED, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - self.assertEqual( - {managed_nodegroup}, - dns.ZoneGenerator._get_forward_nodegroups([domain])) - - def test_get_forward_nodegroups_ignores_other_domains(self): - nodegroups = [self.make_node_group() for _ in range(2)] - self.assertEqual( - {nodegroups[0]}, - dns.ZoneGenerator._get_forward_nodegroups([nodegroups[0].name])) - - def test_get_forward_nodegroups_ignores_unaccepted_nodegroups(self): - domain = factory.make_name('domain') - nodegroups = { - status: factory.make_node_group( - status=status, name=domain, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - for status in map_enum(NODEGROUP_STATUS).values() - } - self.assertEqual( - {nodegroups[NODEGROUP_STATUS.ACCEPTED]}, - dns.ZoneGenerator._get_forward_nodegroups([domain])) - - def test_get_reverse_nodegroups_returns_only_dns_managed_nodegroups(self): - nodegroups = { - management: factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, management=management) - for management in map_enum(NODEGROUPINTERFACE_MANAGEMENT).values() - } - self.assertEqual( - {nodegroups[NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS]}, - dns.ZoneGenerator._get_reverse_nodegroups(nodegroups.values())) - - def test_get_reverse_nodegroups_ignores_other_nodegroups(self): - nodegroups = [self.make_node_group() for _ in range(3)] - self.assertEqual( - {nodegroups[0]}, - dns.ZoneGenerator._get_reverse_nodegroups(nodegroups[:1])) - - def test_get_reverse_nodegroups_ignores_unaccepted_nodegroups(self): - nodegroups = { - status: factory.make_node_group( - status=status, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - for status in map_enum(NODEGROUP_STATUS).values() - } - self.assertEqual( - {nodegroups[NODEGROUP_STATUS.ACCEPTED]}, - dns.ZoneGenerator._get_reverse_nodegroups(nodegroups.values())) - - def test_get_networks_returns_network(self): - nodegroup = self.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - networks_dict = dns.ZoneGenerator._get_networks() - retrieved_interface = networks_dict[nodegroup] - self.assertEqual([interface.network], retrieved_interface) - - def test_get_networks_returns_multiple_networks(self): - nodegroups = [self.make_node_group() for _ in range(3)] - networks_dict = dns.ZoneGenerator._get_networks() - for nodegroup in nodegroups: - [interface] = nodegroup.get_managed_interfaces() - self.assertEqual([interface.network], networks_dict[nodegroup]) - - def test_get_networks_returns_managed_networks(self): - nodegroups = [ - factory.make_node_group( - status=NODEGROUP_STATUS.ACCEPTED, management=management) - for management in map_enum(NODEGROUPINTERFACE_MANAGEMENT).values() - ] - networks_dict = dns.ZoneGenerator._get_networks() - # Force lazydict to evaluate for all these nodegroups. - for nodegroup in nodegroups: - networks_dict[nodegroup] - self.assertEqual( - { - nodegroup: [ - interface.network - for interface in nodegroup.get_managed_interfaces() - ] - for nodegroup in nodegroups - }, - networks_dict) - - def test_with_no_nodegroups_yields_nothing(self): - self.assertEqual([], dns.ZoneGenerator(()).as_list()) - - def test_with_one_nodegroup_yields_forward_and_reverse_zone(self): - nodegroup = self.make_node_group( - name="henry", network=IPNetwork("10/32")) - zones = dns.ZoneGenerator(nodegroup).as_list() - self.assertThat( - zones, MatchesListwise( - (forward_zone("henry", "10/32"), - reverse_zone("henry", "10/32")))) - - def test_two_managed_interfaces_yields_one_forward_two_reverse_zones(self): - nodegroup = self.make_node_group() - factory.make_node_group_interface( - nodegroup=nodegroup, - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - [interface1, interface2] = nodegroup.get_managed_interfaces() - - expected_zones = [ - forward_zone( - nodegroup.name, interface1.network, interface2.network), - reverse_zone(nodegroup.name, interface1.network), - reverse_zone(nodegroup.name, interface2.network), - ] - self.assertThat( - dns.ZoneGenerator([nodegroup]).as_list(), - MatchesListwise(expected_zones)) - - def test_with_many_nodegroups_yields_many_zones(self): - # This demonstrates ZoneGenerator in all-singing all-dancing mode. - nodegroups = [ - self.make_node_group(name="one", network=IPNetwork("10/32")), - self.make_node_group(name="one", network=IPNetwork("11/32")), - self.make_node_group(name="two", network=IPNetwork("20/32")), - self.make_node_group(name="two", network=IPNetwork("21/32")), - ] - [ # Other nodegroups. - self.make_node_group(name="one", network=IPNetwork("12/32")), - self.make_node_group(name="two", network=IPNetwork("22/32")), - ] - expected_zones = ( - # For the forward zones, all nodegroups sharing a domain name, - # even those not passed into ZoneGenerator, are consolidated into - # a single forward zone description. - forward_zone("one", "10/32", "11/32", "12/32"), - forward_zone("two", "20/32", "21/32", "22/32"), - # For the reverse zones, a single reverse zone description is - # generated for each nodegroup passed in, in network order. - reverse_zone("one", "10/32"), - reverse_zone("one", "11/32"), - reverse_zone("two", "20/32"), - reverse_zone("two", "21/32"), - ) - self.assertThat( - dns.ZoneGenerator(nodegroups).as_list(), - MatchesListwise(expected_zones)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_event_connect.py maas-1.7.6+bzr3376/src/maasserver/tests/test_event_connect.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_event_connect.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_event_connect.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,81 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for node transition event.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + + +from maasserver.enum import NODE_STATUS_CHOICES_DICT +from maasserver.models import Event +from maasserver.node_status import ( + get_failed_status, + NODE_STATUS, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from provisioningserver.events import ( + EVENT_DETAILS, + EVENT_TYPES, + ) + + +class TestStatusTransitionEvent(MAASServerTestCase): + + def setUp(self): + super(TestStatusTransitionEvent, self).setUp() + # Circular imports. + from maasserver import event_connect + self.patch(event_connect, 'STATE_TRANSITION_EVENT_CONNECT', True) + + def test_changing_status_of_node_emits_event(self): + old_status = NODE_STATUS.COMMISSIONING + node = factory.make_Node(status=old_status) + node.status = get_failed_status(old_status) + node.save() + + latest_event = Event.objects.filter(node=node).last() + description = "From '%s' to '%s'" % ( + NODE_STATUS_CHOICES_DICT[old_status], + NODE_STATUS_CHOICES_DICT[node.status], + ) + self.assertEqual( + ( + EVENT_TYPES.NODE_CHANGED_STATUS, + EVENT_DETAILS[ + EVENT_TYPES.NODE_CHANGED_STATUS].description, + description, + ), + ( + latest_event.type.name, + latest_event.type.description, + latest_event.description, + )) + + def test_changing_to_allocated_includes_user_name(self): + old_status = NODE_STATUS.READY + user = factory.make_User() + node = factory.make_Node(status=old_status) + node.acquire(user) + + latest_event = Event.objects.filter(node=node).last() + description = "From '%s' to '%s' (to %s)" % ( + NODE_STATUS_CHOICES_DICT[old_status], + NODE_STATUS_CHOICES_DICT[node.status], + user.username, + ) + self.assertEqual( + EVENT_TYPES.NODE_CHANGED_STATUS, latest_event.type.name) + self.assertEqual( + EVENT_DETAILS[EVENT_TYPES.NODE_CHANGED_STATUS].description, + latest_event.type.description) + self.assertEqual(description, latest_event.description) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_eventloop.py maas-1.7.6+bzr3376/src/maasserver/tests/test_eventloop.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_eventloop.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_eventloop.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,14 +14,20 @@ __metaclass__ = type __all__ = [] -from maasserver import eventloop +from crochet import wait_for_reactor +from django.db import connections +from maasserver import ( + bootresources, + eventloop, + nonces_cleanup, + ) from maasserver.rpc import regionservice +from maasserver.testing.eventloop import RegionEventLoopFixture +from maasserver.utils.async import transactional +from maastesting.factory import factory from maastesting.testcase import MAASTestCase from testtools.matchers import IsInstance -from twisted.application.service import ( - MultiService, - Service, - ) +from twisted.python.threadable import isInIOThread class TestRegionEventLoop(MAASTestCase): @@ -33,20 +39,22 @@ def test_start_and_stop(self): # Replace the factories in RegionEventLoop with non-functional - # dummies to avoid bringing up real services here. - self.patch(eventloop.loop, "factories", tuple( - (name, Service) for name, _ in eventloop.loop.factories)) - # Reset the services list. - self.patch(eventloop.loop, "services", MultiService()) + # dummies to avoid bringing up real services here, and ensure + # that the services list is empty. + self.useFixture(RegionEventLoopFixture()) # At the outset, the eventloop's services are dorment. self.assertFalse(eventloop.loop.services.running) + # RegionEventLoop.running is an alias for .services.running. + self.assertFalse(eventloop.loop.running) self.assertEqual( set(eventloop.loop.services), set()) # After starting the loop, the services list is populated, and # the services are started too. eventloop.loop.start().wait(5) + self.addCleanup(lambda: eventloop.loop.reset().wait(5)) self.assertTrue(eventloop.loop.services.running) + self.assertTrue(eventloop.loop.running) self.assertEqual( {service.name for service in eventloop.loop.services}, {name for name, _ in eventloop.loop.factories}) @@ -59,16 +67,44 @@ # but the services are all stopped. eventloop.loop.stop().wait(5) self.assertFalse(eventloop.loop.services.running) + self.assertFalse(eventloop.loop.running) self.assertEqual( {service.name for service in eventloop.loop.services}, {name for name, _ in eventloop.loop.factories}) # The hook has been cleared. self.assertIsNone(eventloop.loop.handle) + def test_reset(self): + # Replace the factories in RegionEventLoop with non-functional + # dummies to avoid bringing up real services here, and ensure + # that the services list is empty. + self.useFixture(RegionEventLoopFixture()) + eventloop.loop.start().wait(5) + eventloop.loop.reset().wait(5) + # After stopping the loop, the services list is also emptied. + self.assertFalse(eventloop.loop.services.running) + self.assertFalse(eventloop.loop.running) + self.assertEqual( + set(eventloop.loop.services), + set()) + # The hook has also been cleared. + self.assertIsNone(eventloop.loop.handle) + + def test_reset_clears_factories(self): + eventloop.loop.factories = ( + (factory.make_name("service"), None), + ) + eventloop.loop.reset().wait(5) + # The loop's factories are also reset. + self.assertEqual( + eventloop.loop.__class__.factories, + eventloop.loop.factories) + def test_module_globals(self): # Several module globals are references to a shared RegionEventLoop. self.assertIs(eventloop.services, eventloop.loop.services) # Must compare by equality here; these methods are decorated. + self.assertEqual(eventloop.reset, eventloop.loop.reset) self.assertEqual(eventloop.start, eventloop.loop.start) self.assertEqual(eventloop.stop, eventloop.loop.stop) @@ -91,3 +127,49 @@ self.assertIn( eventloop.make_RegionAdvertisingService, {factory for _, factory in eventloop.loop.factories}) + + def test_make_NonceCleanupService(self): + service = eventloop.make_NonceCleanupService() + self.assertThat(service, IsInstance( + nonces_cleanup.NonceCleanupService)) + # It is registered as a factory in RegionEventLoop. + self.assertIn( + eventloop.make_NonceCleanupService, + {factory for _, factory in eventloop.loop.factories}) + + def test_make_ImportResourcesService(self): + service = eventloop.make_ImportResourcesService() + self.assertThat(service, IsInstance( + bootresources.ImportResourcesService)) + # It is registered as a factory in RegionEventLoop. + self.assertIn( + eventloop.make_ImportResourcesService, + {factory for _, factory in eventloop.loop.factories}) + + +class TestDisablingDatabaseConnections(MAASTestCase): + + @wait_for_reactor + def test_connections_are_all_stubs_in_the_event_loop(self): + self.assertTrue(isInIOThread()) + for alias in connections: + connection = connections[alias] + # isinstance() fails because it references __bases__, so + # compare types here. + self.assertEqual( + eventloop.DisabledDatabaseConnection, + type(connection)) + + @transactional + def test_connections_are_all_usable_outside_the_event_loop(self): + self.assertFalse(isInIOThread()) + for alias in connections: + connection = connections[alias] + self.assertTrue(connection.is_usable()) + + def test_DisabledDatabaseConnection(self): + connection = eventloop.DisabledDatabaseConnection() + self.assertRaises(RuntimeError, getattr, connection, "connect") + self.assertRaises(RuntimeError, getattr, connection, "__call__") + self.assertRaises(RuntimeError, setattr, connection, "foo", "bar") + self.assertRaises(RuntimeError, delattr, connection, "baz") diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_exceptions.py maas-1.7.6+bzr3376/src/maasserver/tests/test_exceptions.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_exceptions.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_exceptions.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the exceptions module.""" @@ -28,7 +28,7 @@ class TestExceptions(MAASTestCase): def test_MAASAPIException_produces_http_response(self): - error = factory.getRandomString() + error = factory.make_string() exception = MAASAPIBadRequest(error) response = exception.make_http_response() self.assertEqual( @@ -36,7 +36,7 @@ (response.status_code, response.content)) def test_Redirect_produces_redirect_to_given_URL(self): - target = factory.getRandomString() + target = factory.make_string() exception = Redirect(target) response = exception.make_http_response() self.assertEqual(target, extract_redirect(response)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_fields.py maas-1.7.6+bzr3376/src/maasserver/tests/test_fields.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_fields.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_fields.py 2015-07-10 01:27:14.000000000 +0000 @@ -16,6 +16,7 @@ import json from random import randint +import re from django.core import serializers from django.core.exceptions import ValidationError @@ -23,11 +24,19 @@ connection, DatabaseError, ) +from django.db.models import BinaryField +from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT from maasserver.fields import ( + EditableBinaryField, + IPListFormField, + LargeObjectField, + LargeObjectFile, MAC, NodeGroupFormField, register_mac_type, validate_mac, + VerboseRegexField, + VerboseRegexValidator, ) from maasserver.models import ( MACAddress, @@ -37,24 +46,29 @@ from maasserver.testing.testcase import MAASServerTestCase from maasserver.tests.models import ( JSONFieldModel, + LargeObjectFieldModel, + MAASIPAddressFieldModel, XMLFieldModel, ) from maastesting.djangotestcase import TestModelMixin +from maastesting.matchers import MockCalledOnceWith +from psycopg2 import OperationalError from psycopg2.extensions import ISQLQuote class TestNodeGroupFormField(MAASServerTestCase): def test_label_from_instance_tolerates_missing_interface(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() nodegroup.nodegroupinterface_set.all().delete() self.assertEqual( nodegroup.name, NodeGroupFormField().label_from_instance(nodegroup)) def test_label_from_instance_shows_name_and_address(self): - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface( + nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.assertEqual( '%s: %s' % (nodegroup.name, interface.ip), NodeGroupFormField().label_from_instance(nodegroup)) @@ -67,41 +81,41 @@ [field.clean(spelling) for spelling in spellings_for_none]) def test_clean_accepts_nodegroup(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual(nodegroup, NodeGroupFormField().clean(nodegroup)) def test_clean_accepts_id_as_unicode(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean("%s" % nodegroup.id)) def test_clean_accepts_id_as_bytes(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(("%s" % nodegroup.id).encode('ascii'))) def test_clean_accepts_uuid(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.uuid)) def test_clean_accepts_uuid_as_bytes(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.uuid.encode('ascii'))) def test_clean_accepts_cluster_name(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.cluster_name)) def test_clean_accepts_cluster_name_as_bytes(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.cluster_name.encode('ascii'))) @@ -110,7 +124,7 @@ # This cluster has a name that looks just like a number. Pick a number # that's highly unlikely to clash with the node's ID. cluster_name = '%s' % randint(1000000, 10000000) - nodegroup = factory.make_node_group(cluster_name=cluster_name) + nodegroup = factory.make_NodeGroup(cluster_name=cluster_name) self.assertEqual(nodegroup, NodeGroupFormField().clean(cluster_name)) def test_clean_rejects_unknown_nodegroup(self): @@ -129,22 +143,22 @@ self.assertIsNone(MAC(None).get_raw()) def test_get_raw_returns_wrapped_address(self): - addr = factory.getRandomMACAddress() + addr = factory.make_mac_address() self.assertEqual(addr, MAC(addr).get_raw()) def test_get_raw_punches_through_double_wrapping(self): - addr = factory.getRandomMACAddress() + addr = factory.make_mac_address() self.assertEqual(addr, MAC(MAC(addr)).get_raw()) def test_getquoted_returns_NULL_for_None(self): self.assertEqual("NULL", MAC(None).getquoted()) def test_getquoted_returns_SQL_for_MAC(self): - addr = factory.getRandomMACAddress() + addr = factory.make_mac_address() self.assertEqual("'%s'::macaddr" % addr, MAC(addr).getquoted()) def test_getquoted_punches_through_double_wrapping(self): - addr = factory.getRandomMACAddress() + addr = factory.make_mac_address() self.assertEqual("'%s'::macaddr" % addr, MAC(MAC(addr)).getquoted()) def test_mac_equals_self(self): @@ -152,7 +166,7 @@ self.assertTrue(mac == mac) def test_mac_equals_identical_mac(self): - addr = factory.getRandomMACAddress() + addr = factory.make_mac_address() self.assertTrue(MAC(addr) == MAC(addr)) def test_eq_punches_through_double_wrapping_on_self(self): @@ -203,13 +217,13 @@ self.assertItemsEqual(set([mac1, mac2]), [mac1, mac2]) def test_identical_macs_hash_identically(self): - addr = factory.getRandomMACAddress() + addr = factory.make_mac_address() self.assertItemsEqual( set([MAC(addr), MAC(addr), MAC(MAC(addr)), addr]), [addr]) def test_django_serializes_MAC_to_JSON(self): - mac = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() query = MACAddress.objects.filter(id=mac.id) output = serializers.serialize('json', query) self.assertIn(json.dumps(mac.mac_address.get_raw()), output) @@ -222,10 +236,39 @@ pass +class TestVerboseRegexValidator(MAASServerTestCase): + + def test_VerboseRegexValidator_validates_value(self): + validator = VerboseRegexValidator( + regex="test", message="Unknown value") + self.assertIsNone(validator('test')) + + def test_VerboseRegexValidator_validation_error_includes_value(self): + message = "Unknown value: %(value)s" + validator = VerboseRegexValidator(regex="test", message=message) + value = factory.make_name('value') + error = self.assertRaises(ValidationError, validator, value) + self.assertEqual(message % {'value': value}, error.message) + + +class TestVerboseRegexField(MAASServerTestCase): + + def test_VerboseRegexField_accepts_valid_value(self): + field = VerboseRegexField(regex="test", message="Unknown value") + self.assertEqual('test', field.clean('test')) + + def test_VerboseRegexField_validation_error_includes_value(self): + message = "Unknown value: %(value)s" + field = VerboseRegexField(regex="test", message=message) + value = factory.make_name('value') + error = self.assertRaises(ValidationError, field.clean, value) + self.assertEqual([message % {'value': value}], error.messages) + + class TestMACAddressField(MAASServerTestCase): def test_mac_address_is_stored_normalized_and_loaded(self): - stored_mac = factory.make_mac_address(' AA-bb-CC-dd-EE-Ff ') + stored_mac = factory.make_MACAddress_with_Node(' AA-bb-CC-dd-EE-Ff ') stored_mac.save() loaded_mac = MACAddress.objects.get(id=stored_mac.id) self.assertEqual('aa:bb:cc:dd:ee:ff', loaded_mac.mac_address) @@ -279,7 +322,7 @@ {"not": 5, "another": "test"}, ] for value in values: - name = factory.getRandomString() + name = factory.make_string() test_instance = JSONFieldModel(name=name, value=value) test_instance.save() @@ -309,21 +352,21 @@ app = 'maasserver.tests' def test_loads_string(self): - name = factory.getRandomString() + name = factory.make_string() value = "" XMLFieldModel.objects.create(name=name, value=value) instance = XMLFieldModel.objects.get(name=name) self.assertEqual(value, instance.value) def test_lookup_xpath_exists_result(self): - name = factory.getRandomString() + name = factory.make_string() XMLFieldModel.objects.create(name=name, value="") result = XMLFieldModel.objects.raw( "SELECT * FROM docs WHERE xpath_exists(%s, value)", ["//test"]) self.assertEqual(name, result[0].name) def test_lookup_xpath_exists_no_result(self): - name = factory.getRandomString() + name = factory.make_string() XMLFieldModel.objects.create(name=name, value="") result = XMLFieldModel.objects.raw( "SELECT * FROM docs WHERE xpath_exists(%s, value)", ["//miss"]) @@ -344,3 +387,171 @@ def test_lookup_exact_unsupported(self): self.assertRaises(TypeError, XMLFieldModel.objects.get, value="") + + +class TestEditableBinaryField(MAASServerTestCase): + + def test_is_BinaryField(self): + self.assertIsInstance(EditableBinaryField(), BinaryField) + + def test_is_editable(self): + self.assertTrue(EditableBinaryField().editable) + + +class TestMAASIPAddressField(TestModelMixin, MAASServerTestCase): + + app = 'maasserver.tests' + + def test_uses_ip_comparison(self): + ip_object = MAASIPAddressFieldModel.objects.create( + ip_address='192.0.2.99') + results = MAASIPAddressFieldModel.objects.filter( + ip_address__lte='192.0.2.100') + self.assertItemsEqual([ip_object], results) + + +class TestLargeObjectField(TestModelMixin, MAASServerTestCase): + + app = 'maasserver.tests' + + def test_stores_data(self): + data = factory.make_string() + test_name = factory.make_name('name') + test_instance = LargeObjectFieldModel(name=test_name) + large_object = LargeObjectFile() + with large_object.open('wb') as stream: + stream.write(data) + test_instance.large_object = large_object + test_instance.save() + test_instance = LargeObjectFieldModel.objects.get(name=test_name) + with test_instance.large_object.open('rb') as stream: + saved_data = stream.read() + self.assertEqual(data, saved_data) + + def test_with_exit_calls_close(self): + data = factory.make_string() + large_object = LargeObjectFile() + with large_object.open('wb') as stream: + self.addCleanup(large_object.close) + mock_close = self.patch(large_object, 'close') + stream.write(data) + self.assertThat(mock_close, MockCalledOnceWith()) + + def test_unlink(self): + data = factory.make_string() + large_object = LargeObjectFile() + with large_object.open('wb') as stream: + stream.write(data) + oid = large_object.oid + large_object.unlink() + self.assertEqual(0, large_object.oid) + self.assertRaises( + OperationalError, + connection.connection.lobject, oid) + + def test_interates_on_block_size(self): + # String size is multiple of block_size in the testing model + data = factory.make_string(10 * 2) + test_name = factory.make_name('name') + test_instance = LargeObjectFieldModel(name=test_name) + large_object = LargeObjectFile() + with large_object.open('wb') as stream: + stream.write(data) + test_instance.large_object = large_object + test_instance.save() + test_instance = LargeObjectFieldModel.objects.get(name=test_name) + with test_instance.large_object.open('rb') as stream: + offset = 0 + for block in stream: + self.assertEqual(data[offset:offset + 10], block) + offset += 10 + + def test_get_db_prep_value_returns_None_when_value_None(self): + field = LargeObjectField() + self.assertEqual(None, field.get_db_prep_value(None)) + + def test_get_db_prep_value_returns_oid_when_value_LargeObjectFile(self): + oid = randint(1, 100) + field = LargeObjectField() + obj_file = LargeObjectFile() + obj_file.oid = oid + self.assertEqual(oid, field.get_db_prep_value(obj_file)) + + def test_get_db_prep_value_raises_error_when_oid_less_than_zero(self): + oid = randint(-100, 0) + field = LargeObjectField() + obj_file = LargeObjectFile() + obj_file.oid = oid + self.assertRaises(AssertionError, field.get_db_prep_value, obj_file) + + def test_get_db_prep_value_raises_error_when_not_LargeObjectFile(self): + field = LargeObjectField() + self.assertRaises( + AssertionError, field.get_db_prep_value, factory.make_string()) + + def test_to_python_returns_None_when_value_None(self): + field = LargeObjectField() + self.assertEqual(None, field.to_python(None)) + + def test_to_python_returns_value_when_value_LargeObjectFile(self): + field = LargeObjectField() + obj_file = LargeObjectFile() + self.assertEqual(obj_file, field.to_python(obj_file)) + + def test_to_python_returns_LargeObjectFile_when_value_int(self): + oid = randint(1, 100) + field = LargeObjectField() + # South normally substitutes a FakeModel here, but with a baseline + # schema, we can skip the migration that creates LargeObjectField. + self.patch(field, 'model') + obj_file = field.to_python(oid) + self.assertEqual(oid, obj_file.oid) + + def test_to_python_returns_LargeObjectFile_when_value_long(self): + oid = long(randint(1, 100)) + field = LargeObjectField() + # South normally substitutes a FakeModel here, but with a baseline + # schema, we can skip the migration that creates LargeObjectField. + self.patch(field, 'model') + obj_file = field.to_python(oid) + self.assertEqual(oid, obj_file.oid) + + def test_to_python_raises_error_when_not_valid_type(self): + field = LargeObjectField() + self.assertRaises( + AssertionError, field.to_python, factory.make_string()) + + +class IPListFormFieldTest(MAASServerTestCase): + + def test_accepts_none(self): + self.assertIsNone(IPListFormField().clean(None)) + + def test_accepts_single_ip(self): + ip = factory.make_ipv4_address() + self.assertEquals(ip, IPListFormField().clean(ip)) + + def test_accepts_space_separated_ips(self): + ips = [factory.make_ip_address() for _ in range(5)] + input = ' '.join(ips) + self.assertEquals(input, IPListFormField().clean(input)) + + def test_accepts_comma_separated_ips(self): + ips = [factory.make_ip_address() for _ in range(5)] + input = ','.join(ips) + self.assertEquals(' '.join(ips), IPListFormField().clean(input)) + + def test_rejects_invalid_input(self): + invalid = factory.make_name('invalid') + input = ' '.join([factory.make_ip_address(), invalid]) + error = self.assertRaises( + ValidationError, IPListFormField().clean, input) + self.assertIn("Invalid IP address: %s" % invalid, error.message) + + def test_separators_dont_conflict_with_ipv4_address(self): + self.assertIsNone(re.search( + IPListFormField.separators, factory.make_ipv4_address())) + + def test_separators_dont_conflict_with_ipv6_address(self): + self.assertIsNone(re.search( + IPListFormField.separators, factory.make_ipv6_address())) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bootresource.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bootresource.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bootresource.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bootresource.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,172 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `BootSourceForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import random + +from django.core.files.uploadedfile import SimpleUploadedFile +from maasserver.enum import ( + BOOT_RESOURCE_FILE_TYPE, + BOOT_RESOURCE_TYPE, + ) +from maasserver.forms import BootResourceForm +from maasserver.models import BootResource +from maasserver.testing.architecture import make_usable_architecture +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase + + +class TestBootResourceForm(MAASServerTestCase): + + def pick_filetype(self): + upload_type = random.choice([ + 'tgz', 'ddtgz']) + if upload_type == 'tgz': + filetype = BOOT_RESOURCE_FILE_TYPE.ROOT_TGZ + elif upload_type == 'ddtgz': + filetype = BOOT_RESOURCE_FILE_TYPE.ROOT_DD + return upload_type, filetype + + def test_creates_boot_resource(self): + name = factory.make_name('name') + title = factory.make_name('title') + architecture = make_usable_architecture(self) + subarch = architecture.split('/')[1] + upload_type, filetype = self.pick_filetype() + size = random.randint(1024, 2048) + content = factory.make_string(size).encode('utf-8') + upload_name = factory.make_name('filename') + uploaded_file = SimpleUploadedFile(content=content, name=upload_name) + data = { + 'name': name, + 'title': title, + 'architecture': architecture, + 'filetype': upload_type, + } + form = BootResourceForm(data=data, files={'content': uploaded_file}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + resource = BootResource.objects.get( + rtype=BOOT_RESOURCE_TYPE.UPLOADED, + name=name, architecture=architecture) + resource_set = resource.sets.first() + rfile = resource_set.files.first() + self.assertEqual(title, resource.extra['title']) + self.assertEqual(subarch, resource.extra['subarches']) + self.assertTrue(filetype, rfile.filetype) + self.assertTrue(filetype, rfile.filename) + self.assertTrue(size, rfile.largefile.total_size) + with rfile.largefile.content.open('rb') as stream: + written_content = stream.read() + self.assertEqual(content, written_content) + + def test_adds_boot_resource_set_to_existing_boot_resource(self): + name = factory.make_name('name') + architecture = make_usable_architecture(self) + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.UPLOADED, + name=name, architecture=architecture) + upload_type, filetype = self.pick_filetype() + size = random.randint(1024, 2048) + content = factory.make_string(size).encode('utf-8') + upload_name = factory.make_name('filename') + uploaded_file = SimpleUploadedFile(content=content, name=upload_name) + data = { + 'name': name, + 'architecture': architecture, + 'filetype': upload_type, + } + form = BootResourceForm(data=data, files={'content': uploaded_file}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + resource = reload_object(resource) + resource_set = resource.sets.order_by('id').last() + rfile = resource_set.files.first() + self.assertTrue(filetype, rfile.filetype) + self.assertTrue(filetype, rfile.filename) + self.assertTrue(size, rfile.largefile.total_size) + with rfile.largefile.content.open('rb') as stream: + written_content = stream.read() + self.assertEqual(content, written_content) + + def test_creates_boot_resoures_with_generated_rtype(self): + os = factory.make_name('os') + series = factory.make_name('series') + name = '%s/%s' % (os, series) + architecture = make_usable_architecture(self) + upload_type, filetype = self.pick_filetype() + size = random.randint(1024, 2048) + content = factory.make_string(size).encode('utf-8') + upload_name = factory.make_name('filename') + uploaded_file = SimpleUploadedFile(content=content, name=upload_name) + data = { + 'name': name, + 'architecture': architecture, + 'filetype': upload_type, + } + form = BootResourceForm(data=data, files={'content': uploaded_file}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + resource = BootResource.objects.get( + rtype=BOOT_RESOURCE_TYPE.GENERATED, + name=name, architecture=architecture) + resource_set = resource.sets.first() + rfile = resource_set.files.first() + self.assertTrue(filetype, rfile.filetype) + self.assertTrue(filetype, rfile.filename) + self.assertTrue(size, rfile.largefile.total_size) + with rfile.largefile.content.open('rb') as stream: + written_content = stream.read() + self.assertEqual(content, written_content) + + def test_adds_boot_resource_set_to_existing_generated_boot_resource(self): + os = factory.make_name('os') + series = factory.make_name('series') + name = '%s/%s' % (os, series) + architecture = make_usable_architecture(self) + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.GENERATED, + name=name, architecture=architecture) + upload_type, filetype = self.pick_filetype() + size = random.randint(1024, 2048) + content = factory.make_string(size).encode('utf-8') + upload_name = factory.make_name('filename') + uploaded_file = SimpleUploadedFile(content=content, name=upload_name) + data = { + 'name': name, + 'architecture': architecture, + 'filetype': upload_type, + } + form = BootResourceForm(data=data, files={'content': uploaded_file}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + resource = reload_object(resource) + resource_set = resource.sets.order_by('id').last() + rfile = resource_set.files.first() + self.assertTrue(filetype, rfile.filetype) + self.assertTrue(filetype, rfile.filename) + self.assertTrue(size, rfile.largefile.total_size) + with rfile.largefile.content.open('rb') as stream: + written_content = stream.read() + self.assertEqual(content, written_content) + + def test_requires_fields(self): + form = BootResourceForm(data={}) + self.assertFalse(form.is_valid(), form.errors) + self.assertItemsEqual([ + 'name', 'architecture', 'filetype', 'content', + ], + form.errors.keys()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bootsource.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bootsource.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bootsource.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bootsource.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `BootSourceForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from cStringIO import StringIO + +from django.core.files.uploadedfile import InMemoryUploadedFile +from maasserver.forms import BootSourceForm +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.utils import sample_binary_data + + +class TestBootSourceForm(MAASServerTestCase): + """Tests for `BootSourceForm`.""" + + def test_edits_boot_source_object(self): + boot_source = factory.make_BootSource() + params = { + 'url': 'http://example.com/', + 'keyring_filename': factory.make_name('keyring_filename'), + } + form = BootSourceForm(instance=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + form.save() + boot_source = reload_object(boot_source) + self.assertAttributes(boot_source, params) + + def test_creates_boot_source_object_with_keyring_filename(self): + params = { + 'url': 'http://example.com/', + 'keyring_filename': factory.make_name('keyring_filename'), + } + form = BootSourceForm(data=params) + self.assertTrue(form.is_valid(), form._errors) + boot_source = form.save() + self.assertAttributes(boot_source, params) + + def test_creates_boot_source_object_with_keyring_data(self): + in_mem_file = InMemoryUploadedFile( + StringIO(sample_binary_data), name=factory.make_name('name'), + field_name=factory.make_name('field-name'), + content_type='application/octet-stream', + size=len(sample_binary_data), + charset=None) + params = {'url': 'http://example.com/'} + form = BootSourceForm( + data=params, + files={'keyring_data': in_mem_file}) + self.assertTrue(form.is_valid(), form._errors) + boot_source = form.save() + self.assertEqual(sample_binary_data, bytes(boot_source.keyring_data)) + self.assertAttributes(boot_source, params) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bootsourceselection.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bootsourceselection.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bootsourceselection.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bootsourceselection.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,291 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `BootSourceSelectionForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.core.exceptions import ValidationError +from maasserver.forms import BootSourceSelectionForm +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase + + +class TestBootSourceSelectionForm(MAASServerTestCase): + """Tests for `BootSourceSelectionForm`.""" + + def make_valid_source_selection_params(self, boot_source=None): + # Helper that creates a valid BootSourceCache and parameters for + # a BootSourceSelectionForm that will validate against the + # cache. + if boot_source is None: + boot_source = factory.make_BootSource() + arch = factory.make_name('arch') + arch2 = factory.make_name('arch') + subarch = factory.make_name('subarch') + subarch2 = factory.make_name('subarch') + label = factory.make_name('label') + label2 = factory.make_name('label') + params = { + 'os': factory.make_name('os'), + 'release': factory.make_name('release'), + 'arches': [arch, arch2], + 'subarches': [subarch, subarch2], + 'labels': [label, label2], + } + factory.make_BootSourceCache( + boot_source=boot_source, + os=params['os'], + release=params['release'], + arch=arch, + subarch=subarch, + label=label, + ) + factory.make_BootSourceCache( + boot_source=boot_source, + os=params['os'], + release=params['release'], + arch=arch2, + subarch=subarch2, + label=label2, + ) + return params + + def test_edits_boot_source_selection_object(self): + boot_source_selection = factory.make_BootSourceSelection() + boot_source = boot_source_selection.boot_source + params = self.make_valid_source_selection_params(boot_source) + form = BootSourceSelectionForm( + instance=boot_source_selection, data=params) + self.assertTrue(form.is_valid(), form._errors) + form.save() + boot_source_selection = reload_object(boot_source_selection) + self.assertAttributes(boot_source_selection, params) + + def test_creates_boot_source_selection_object(self): + boot_source = factory.make_BootSource() + params = self.make_valid_source_selection_params(boot_source) + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + boot_source_selection = form.save() + self.assertAttributes(boot_source_selection, params) + + def test_cannot_create_duplicate_entry(self): + boot_source = factory.make_BootSource() + params = self.make_valid_source_selection_params(boot_source) + form = BootSourceSelectionForm( + boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + form.save() + + # Duplicates should be detected for the same boot_source, os and + # release, the other fields are irrelevant. + dup_params = { + 'os': params['os'], + 'release': params['release'], + } + form = BootSourceSelectionForm( + boot_source=boot_source, data=dup_params) + self.assertRaises(ValidationError, form.save) + + def test_validates_if_boot_source_cache_has_same_os_and_release(self): + boot_source = factory.make_BootSource() + boot_cache = factory.make_BootSourceCache(boot_source) + + params = { + 'os': boot_cache.os, + 'release': boot_cache.release, + } + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_if_boot_source_cache_has_different_os(self): + boot_source = factory.make_BootSource() + boot_cache = factory.make_BootSourceCache(boot_source) + + params = { + 'os': factory.make_name('os'), + 'release': boot_cache.release, + } + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertFalse(form.is_valid()) + self.assertEqual( + { + "os": [ + "OS %s with release %s has no available images " + "for download" % (params['os'], boot_cache.release) + ] + }, + form._errors) + + def test_rejects_if_boot_source_cache_has_different_release(self): + boot_source = factory.make_BootSource() + boot_cache = factory.make_BootSourceCache(boot_source) + + params = { + 'os': boot_cache.os, + 'release': factory.make_name('release'), + } + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertFalse(form.is_valid()) + self.assertEqual( + { + "os": [ + "OS %s with release %s has no available images " + "for download" % (boot_cache.os, params['release']) + ] + }, + form._errors) + + def make_some_caches(self, boot_source, os, release): + # Make a few BootSourceCache records that the following tests can use + # to validate against when using BootSourceSelectionForm. + return factory.make_many_BootSourceCaches( + 3, boot_source=boot_source, os=os, release=release) + + def test_validates_if_boot_source_cache_has_arch(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + boot_caches = self.make_some_caches(boot_source, os, release) + + # Request arches that are in two of the cache records. + params = { + 'os': os, + 'release': release, + 'arches': [boot_caches[0].arch, boot_caches[2].arch], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_if_boot_source_cache_does_not_have_arch(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + factory.make_BootSourceCache( + boot_source, os=os, release=release) + + params = { + 'os': os, + 'release': release, + 'arches': [factory.make_name('arch')], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertFalse(form.is_valid()) + self.assertEqual( + { + "arches": [ + "No available images to download for %s" % + params['arches'] + ] + }, + form._errors) + + def test_validates_if_boot_source_cache_has_subarch(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + boot_caches = self.make_some_caches(boot_source, os, release) + + # Request subarches that are in two of the cache records. + params = { + 'os': os, + 'release': release, + 'subarches': [boot_caches[0].subarch, boot_caches[2].subarch], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_if_boot_source_cache_does_not_have_subarch(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + factory.make_BootSourceCache( + boot_source, os=os, release=release) + + params = { + 'os': os, + 'release': release, + 'subarches': [factory.make_name('subarch')], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertFalse(form.is_valid()) + self.assertEqual( + { + "subarches": [ + "No available images to download for %s" % + params['subarches'] + ] + }, + form._errors) + + def test_validates_if_boot_source_cache_has_label(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + boot_caches = self.make_some_caches(boot_source, os, release) + + # Request labels that are in two of the cache records. + params = { + 'os': os, + 'release': release, + 'labels': [boot_caches[0].label, boot_caches[2].label], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_if_boot_source_cache_does_not_have_label(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + factory.make_BootSourceCache( + boot_source, os=os, release=release) + + params = { + 'os': os, + 'release': release, + 'labels': [factory.make_name('label')], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertFalse(form.is_valid()) + self.assertEqual( + { + "labels": [ + "No available images to download for %s" % + params['labels'] + ] + }, + form._errors) + + def test_star_values_in_request_validate_against_any_cache(self): + boot_source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + factory.make_BootSourceCache( + boot_source, os=os, release=release) + params = { + 'os': os, + 'release': release, + 'arches': ['*'], + 'subarches': ['*'], + 'labels': ['*'], + } + + form = BootSourceSelectionForm(boot_source=boot_source, data=params) + self.assertTrue(form.is_valid(), form._errors) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bulknodeaction.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bulknodeaction.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_bulknodeaction.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_bulknodeaction.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,250 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `BulkNodeActionForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.enum import NODE_STATUS +from maasserver.exceptions import NodeActionError +from maasserver.forms import ( + BulkNodeActionForm, + SetZoneBulkAction, + ) +from maasserver.models import Node +from maasserver.node_action import ( + Delete, + StartNode, + StopNode, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase + + +class TestBulkNodeActionForm(MAASServerTestCase): + + def test_performs_action(self): + node1 = factory.make_Node() + node2 = factory.make_Node() + node3 = factory.make_Node() + system_id_to_delete = [node1.system_id, node2.system_id] + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict( + action=Delete.name, + system_id=system_id_to_delete)) + self.assertTrue(form.is_valid(), form._errors) + done, not_actionable, not_permitted = form.save() + existing_nodes = list(Node.objects.filter( + system_id__in=system_id_to_delete)) + node3_system_id = reload_object(node3).system_id + self.assertEqual( + [2, 0, 0], + [done, not_actionable, not_permitted]) + self.assertEqual( + [[], node3.system_id], + [existing_nodes, node3_system_id]) + + def test_perform_action_catches_start_action_errors(self): + error_text = factory.make_string(prefix="NodeActionError") + exc = NodeActionError(error_text) + self.patch(StartNode, "execute").side_effect = exc + user = factory.make_User() + factory.make_SSHKey(user) + node = factory.make_Node(status=NODE_STATUS.READY, owner=user) + form = BulkNodeActionForm( + user=user, + data=dict( + action=StartNode.name, + system_id=[node.system_id])) + + self.assertTrue(form.is_valid(), form._errors) + done, not_actionable, not_permitted = form.save() + self.assertEqual( + [0, 1, 0], + [done, not_actionable, not_permitted]) + + def test_first_action_is_empty(self): + form = BulkNodeActionForm(user=factory.make_admin()) + action = form.fields['action'] + default_action = action.choices[0][0] + required = action.required + # The default action is the empty string (i.e. no action) + # and it's a required field. + self.assertEqual(('', True), (default_action, required)) + + def test_admin_is_offered_bulk_node_change(self): + form = BulkNodeActionForm(user=factory.make_admin()) + choices = form.fields['action'].choices + self.assertNotEqual( + [], + [choice for choice in choices if choice[0] == 'set_zone']) + + def test_nonadmin_is_not_offered_bulk_node_change(self): + form = BulkNodeActionForm(user=factory.make_User()) + choices = form.fields['action'].choices + self.assertEqual( + [], + [choice for choice in choices if choice[0] == 'set_zone']) + + def test_gives_stat_when_not_applicable(self): + node1 = factory.make_Node(status=NODE_STATUS.NEW) + node2 = factory.make_Node(status=NODE_STATUS.FAILED_COMMISSIONING) + system_id_for_action = [node1.system_id, node2.system_id] + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict( + action=StartNode.name, + system_id=system_id_for_action)) + self.assertTrue(form.is_valid(), form._errors) + done, not_actionable, not_permitted = form.save() + self.assertEqual( + [0, 2, 0], + [done, not_actionable, not_permitted]) + + def test_gives_stat_when_no_permission(self): + user = factory.make_User() + node = factory.make_Node( + status=NODE_STATUS.DEPLOYED, owner=factory.make_User()) + system_id_for_action = [node.system_id] + form = BulkNodeActionForm( + user=user, + data=dict( + action=StopNode.name, + system_id=system_id_for_action)) + self.assertTrue(form.is_valid(), form._errors) + done, not_actionable, not_permitted = form.save() + self.assertEqual( + [0, 0, 1], + [done, not_actionable, not_permitted]) + + def test_gives_stat_when_action_is_inhibited(self): + node = factory.make_Node( + status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict( + action=StartNode.name, + system_id=[node.system_id])) + self.assertTrue(form.is_valid(), form._errors) + done, not_actionable, not_permitted = form.save() + self.assertEqual( + [0, 1, 0], + [done, not_actionable, not_permitted]) + + def test_rejects_empty_system_ids(self): + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict(action=Delete.name, system_id=[])) + self.assertFalse(form.is_valid(), form._errors) + self.assertEqual( + ["No node selected."], + form._errors['system_id']) + + def test_rejects_invalid_system_ids(self): + node = factory.make_Node() + system_id_to_delete = [node.system_id, "wrong-system_id"] + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict( + action=Delete.name, + system_id=system_id_to_delete)) + self.assertFalse(form.is_valid(), form._errors) + self.assertEqual( + ["Some of the given system ids are invalid system ids."], + form._errors['system_id']) + + def test_rejects_if_no_action(self): + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict(system_id=[factory.make_Node().system_id])) + self.assertFalse(form.is_valid(), form._errors) + + def test_rejects_if_invalid_action(self): + form = BulkNodeActionForm( + user=factory.make_admin(), + data=dict( + action="invalid-action", + system_id=[factory.make_Node().system_id])) + self.assertFalse(form.is_valid(), form._errors) + + def test_set_zone_sets_zone_on_node(self): + node = factory.make_Node() + zone = factory.make_Zone() + form = BulkNodeActionForm( + user=factory.make_admin(), + data={ + 'action': 'set_zone', + 'zone': zone.name, + 'system_id': [node.system_id], + }) + self.assertTrue(form.is_valid(), form._errors) + self.assertEqual((1, 0, 0), form.save()) + node = reload_object(node) + self.assertEqual(zone, node.zone) + + def test_set_zone_does_not_work_if_not_admin(self): + node = factory.make_Node() + form = BulkNodeActionForm( + user=factory.make_User(), + data={ + 'action': SetZoneBulkAction.name, + 'zone': factory.make_Zone().name, + 'system_id': [node.system_id], + }) + self.assertFalse(form.is_valid()) + self.assertIn( + "Select a valid choice. " + "set_zone is not one of the available choices.", + form._errors['action']) + + def test_zone_field_rejects_empty_zone(self): + # If the field is present, the zone name has to be valid + # and the empty string is not a valid zone name. + form = BulkNodeActionForm( + user=factory.make_admin(), + data={ + 'action': SetZoneBulkAction.name, + 'zone': '', + }) + self.assertFalse(form.is_valid(), form._errors) + self.assertEqual( + ["This field is required."], + form._errors['zone']) + + def test_zone_field_present_if_data_is_empty(self): + form = BulkNodeActionForm( + user=factory.make_admin(), + data={}) + self.assertIn('zone', form.fields) + + def test_zone_field_not_present_action_is_not_SetZoneBulkAction(self): + form = BulkNodeActionForm( + user=factory.make_admin(), + data={'action': factory.make_name('action')}) + self.assertNotIn('zone', form.fields) + + def test_set_zone_leaves_unselected_nodes_alone(self): + unselected_node = factory.make_Node() + original_zone = unselected_node.zone + form = BulkNodeActionForm( + user=factory.make_admin(), + data={ + 'action': SetZoneBulkAction.name, + 'zone': factory.make_Zone().name, + 'system_id': [factory.make_Node().system_id], + }) + self.assertTrue(form.is_valid(), form._errors) + self.assertEqual((1, 0, 0), form.save()) + unselected_node = reload_object(unselected_node) + self.assertEqual(original_zone, unselected_node.zone) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_commissioning.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_commissioning.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_commissioning.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_commissioning.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,82 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for commissioning forms.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.core.files.uploadedfile import SimpleUploadedFile +from maasserver.forms import ( + CommissioningForm, + CommissioningScriptForm, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.utils.forms import compose_invalid_choice_text +from metadataserver.models import CommissioningScript +from testtools.matchers import MatchesStructure + + +class TestCommissioningFormForm(MAASServerTestCase): + + def test_commissioningform_error_msg_lists_series_choices(self): + form = CommissioningForm() + field = form.fields['commissioning_distro_series'] + self.assertEqual( + compose_invalid_choice_text( + 'commissioning_distro_series', field.choices), + field.error_messages['invalid_choice']) + + +class TestCommissioningScriptForm(MAASServerTestCase): + + def test_creates_commissioning_script(self): + content = factory.make_string().encode('ascii') + name = factory.make_name('filename') + uploaded_file = SimpleUploadedFile(content=content, name=name) + form = CommissioningScriptForm(files={'content': uploaded_file}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + new_script = CommissioningScript.objects.get(name=name) + self.assertThat( + new_script, + MatchesStructure.byEquality(name=name, content=content)) + + def test_raises_if_duplicated_name(self): + content = factory.make_string().encode('ascii') + name = factory.make_name('filename') + factory.make_CommissioningScript(name=name) + uploaded_file = SimpleUploadedFile(content=content, name=name) + form = CommissioningScriptForm(files={'content': uploaded_file}) + self.assertEqual( + (False, {'content': ["A script with that name already exists."]}), + (form.is_valid(), form._errors)) + + def test_rejects_whitespace_in_name(self): + name = factory.make_name('with space') + content = factory.make_string().encode('ascii') + uploaded_file = SimpleUploadedFile(content=content, name=name) + form = CommissioningScriptForm(files={'content': uploaded_file}) + self.assertFalse(form.is_valid()) + self.assertEqual( + ["Name contains disallowed characters (e.g. space or quotes)."], + form._errors['content']) + + def test_rejects_quotes_in_name(self): + name = factory.make_name("l'horreur") + content = factory.make_string().encode('ascii') + uploaded_file = SimpleUploadedFile(content=content, name=name) + form = CommissioningScriptForm(files={'content': uploaded_file}) + self.assertFalse(form.is_valid()) + self.assertEqual( + ["Name contains disallowed characters (e.g. space or quotes)."], + form._errors['content']) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_config.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_config.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_config.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_config.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,78 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `ConfigForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django import forms +from maasserver.forms import ConfigForm +from maasserver.models import Config +from maasserver.models.config import DEFAULT_CONFIG +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase + + +class TestOptionForm(ConfigForm): + field1 = forms.CharField(label="Field 1", max_length=10) + field2 = forms.BooleanField(label="Field 2", required=False) + + +class TestValidOptionForm(ConfigForm): + maas_name = forms.CharField(label="Field 1", max_length=10) + + +class ConfigFormTest(MAASServerTestCase): + + def test_form_valid_saves_into_db(self): + value = factory.make_string(10) + form = TestValidOptionForm({'maas_name': value}) + result = form.save() + + self.assertTrue(result) + self.assertEqual(value, Config.objects.get_config('maas_name')) + + def test_form_rejects_unknown_settings(self): + value = factory.make_string(10) + value2 = factory.make_string(10) + form = TestOptionForm({'field1': value, 'field2': value2}) + valid = form.is_valid() + + self.assertFalse(valid) + self.assertIn('field1', form._errors) + self.assertIn('field2', form._errors) + + def test_form_invalid_does_not_save_into_db(self): + value_too_long = factory.make_string(20) + form = TestOptionForm({'field1': value_too_long, 'field2': False}) + result = form.save() + + self.assertFalse(result) + self.assertIn('field1', form._errors) + self.assertIsNone(Config.objects.get_config('field1')) + self.assertIsNone(Config.objects.get_config('field2')) + + def test_form_loads_initial_values(self): + value = factory.make_string() + Config.objects.set_config('field1', value) + form = TestOptionForm() + + self.assertItemsEqual(['field1'], form.initial) + self.assertEqual(value, form.initial['field1']) + + def test_form_loads_initial_values_from_default_value(self): + value = factory.make_string() + DEFAULT_CONFIG['field1'] = value + form = TestOptionForm() + + self.assertItemsEqual(['field1'], form.initial) + self.assertEqual(value, form.initial['field1']) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_deploy.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_deploy.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_deploy.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_deploy.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,49 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `DeployForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.forms import DeployForm +from maasserver.testing.osystems import make_usable_osystem +from maasserver.testing.testcase import MAASServerTestCase + + +class TestDeployForm(MAASServerTestCase): + """Tests for `DeployForm`.""" + + def test_uses_live_data(self): + # The DeployForm uses the database rather than just relying on + # hard-coded stuff. + osystem = make_usable_osystem(self) + os_name = osystem['name'] + release_name = osystem['default_release'] + release_name = "%s/%s" % (os_name, release_name) + deploy_form = DeployForm() + os_choices = deploy_form.fields['default_osystem'].choices + os_names = [name for name, title in os_choices] + release_choices = deploy_form.fields['default_distro_series'].choices + release_names = [name for name, title in release_choices] + self.assertIn(os_name, os_names) + self.assertIn(release_name, release_names) + + def test_accepts_new_values(self): + osystem = make_usable_osystem(self) + os_name = osystem['name'] + release_name = osystem['default_release'] + params = { + 'default_osystem': os_name, + 'default_distro_series': "%s/%s" % (os_name, release_name), + } + form = DeployForm(data=params) + self.assertTrue(form.is_valid()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_downloadprogress.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_downloadprogress.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_downloadprogress.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_downloadprogress.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,73 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `DownloadProgressForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.forms import DownloadProgressForm +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase + + +class TestDownloadProgressForm(MAASServerTestCase): + + def test_updates_instance(self): + progress = factory.make_DownloadProgress_incomplete(size=None) + new_bytes_downloaded = progress.bytes_downloaded + 1 + size = progress.bytes_downloaded + 2 + error = factory.make_string() + + form = DownloadProgressForm( + data={ + 'size': size, + 'bytes_downloaded': new_bytes_downloaded, + 'error': error, + }, + instance=progress) + new_progress = form.save() + + progress = reload_object(progress) + self.assertEqual(progress, new_progress) + self.assertEqual(size, progress.size) + self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded) + self.assertEqual(error, progress.error) + + def test_rejects_unknown_ongoing_download(self): + form = DownloadProgressForm( + data={'bytes_downloaded': 1}, instance=None) + + self.assertFalse(form.is_valid()) + + def test_get_download_returns_ongoing_download(self): + progress = factory.make_DownloadProgress_incomplete() + + self.assertEqual( + progress, + DownloadProgressForm.get_download( + progress.nodegroup, progress.filename, + progress.bytes_downloaded + 1)) + + def test_get_download_recognises_start_of_new_download(self): + nodegroup = factory.make_NodeGroup() + filename = factory.make_string() + progress = DownloadProgressForm.get_download(nodegroup, filename, None) + self.assertIsNotNone(progress) + self.assertEqual(nodegroup, progress.nodegroup) + self.assertEqual(filename, progress.filename) + self.assertIsNone(progress.bytes_downloaded) + + def test_get_download_returns_none_for_unknown_ongoing_download(self): + self.assertIsNone( + DownloadProgressForm.get_download( + factory.make_NodeGroup(), factory.make_string(), 1)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_helpers.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_helpers.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_helpers.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_helpers.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,171 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for forms helpers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.enum import ( + BOOT_RESOURCE_TYPE, + NODE_STATUS, + ) +from maasserver.forms import ( + AdminNodeForm, + AdminNodeWithMACAddressesForm, + get_node_create_form, + get_node_edit_form, + initialize_node_group, + list_all_usable_architectures, + MAASModelForm, + NodeForm, + NodeWithMACAddressesForm, + pick_default_architecture, + remove_None_values, + ) +from maasserver.models import ( + Node, + NodeGroup, + ) +from maasserver.testing.architecture import make_usable_architecture +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase + + +class TestHelpers(MAASServerTestCase): + + def make_usable_boot_resource(self, arch=None, subarch=None): + """Create a set of boot resources, so the architecture becomes usable. + + This will make the resources' architecture show up in the list of + usable architectures. + """ + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + for purpose in ['install', 'commissioning']: + architecture = '%s/%s' % (arch, subarch) + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, architecture=architecture) + + def test_initialize_node_group_leaves_nodegroup_reference_intact(self): + preselected_nodegroup = factory.make_NodeGroup() + node = factory.make_Node(nodegroup=preselected_nodegroup) + initialize_node_group(node) + self.assertEqual(preselected_nodegroup, node.nodegroup) + + def test_initialize_node_group_initializes_nodegroup_to_form_value(self): + node = Node( + NODE_STATUS.NEW, architecture=make_usable_architecture(self)) + nodegroup = factory.make_NodeGroup() + initialize_node_group(node, nodegroup) + self.assertEqual(nodegroup, node.nodegroup) + + def test_initialize_node_group_defaults_to_master(self): + node = Node( + NODE_STATUS.NEW, + architecture=make_usable_architecture(self)) + initialize_node_group(node) + self.assertEqual(NodeGroup.objects.ensure_master(), node.nodegroup) + + def test_list_all_usable_architectures_combines_nodegroups(self): + arches = [ + (factory.make_name('arch'), factory.make_name('subarch')) + for _ in range(3)] + for arch, subarch in arches: + self.make_usable_boot_resource(arch=arch, subarch=subarch) + expected = [ + "%s/%s" % (arch, subarch) for arch, subarch in arches] + self.assertItemsEqual(expected, list_all_usable_architectures()) + + def test_list_all_usable_architectures_sorts_output(self): + arches = [ + (factory.make_name('arch'), factory.make_name('subarch')) + for _ in range(3)] + for arch, subarch in arches: + self.make_usable_boot_resource(arch=arch, subarch=subarch) + expected = [ + "%s/%s" % (arch, subarch) for arch, subarch in arches] + self.assertEqual(sorted(expected), list_all_usable_architectures()) + + def test_list_all_usable_architectures_returns_no_duplicates(self): + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + self.make_usable_boot_resource(arch=arch, subarch=subarch) + self.make_usable_boot_resource(arch=arch, subarch=subarch) + self.assertEqual( + ["%s/%s" % (arch, subarch)], list_all_usable_architectures()) + + def test_pick_default_architecture_returns_empty_if_no_options(self): + self.assertEqual('', pick_default_architecture([])) + + def test_pick_default_architecture_prefers_i386_generic_if_usable(self): + self.assertEqual( + 'i386/generic', + pick_default_architecture( + ['amd64/generic', 'i386/generic', 'mips/generic'])) + + def test_pick_default_architecture_falls_back_to_first_option(self): + arches = [factory.make_name('arch') for _ in range(5)] + self.assertEqual(arches[0], pick_default_architecture(arches)) + + def test_remove_None_values_removes_None_values_in_dict(self): + random_input = factory.make_string() + self.assertEqual( + {random_input: random_input}, + remove_None_values({ + random_input: random_input, + factory.make_string(): None, + })) + + def test_remove_None_values_leaves_empty_dict_untouched(self): + self.assertEqual({}, remove_None_values({})) + + def test_get_node_edit_form_returns_NodeForm_if_non_admin(self): + user = factory.make_User() + self.assertEqual(NodeForm, get_node_edit_form(user)) + + def test_get_node_edit_form_returns_APIAdminNodeEdit_if_admin(self): + admin = factory.make_admin() + self.assertEqual(AdminNodeForm, get_node_edit_form(admin)) + + def test_get_node_create_form_if_non_admin(self): + user = factory.make_User() + self.assertEqual( + NodeWithMACAddressesForm, get_node_create_form(user)) + + def test_get_node_create_form_if_admin(self): + admin = factory.make_admin() + self.assertEqual( + AdminNodeWithMACAddressesForm, get_node_create_form(admin)) + + +class TestModelForm(MAASServerTestCase): + + def test_model_class_from_UI_has_hidden_field(self): + class TestClass(MAASModelForm): + class Meta: + model = Node + + form = TestClass(ui_submission=True) + self.assertIn('ui_submission', form.fields) + self.assertTrue( + form.fields['ui_submission'].widget.is_hidden, + "ui_submission field is not 'hidden'") + + def test_model_class_from_API_doesnt_have_hidden_field(self): + class TestClass(MAASModelForm): + class Meta: + model = Node + + form = TestClass() + self.assertNotIn('ui_submission', form.fields) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_instancelistfield.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_instancelistfield.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_instancelistfield.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_instancelistfield.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `InstanceListField`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.core.exceptions import ValidationError +from maasserver.forms import InstanceListField +from maasserver.models import Node +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase + + +class TestInstanceListField(MAASServerTestCase): + """Tests for `InstanceListField`.""" + + def test_field_validates_valid_data(self): + nodes = [factory.make_Node() for _ in range(3)] + # Create other nodes. + [factory.make_Node() for _ in range(3)] + field = InstanceListField(model_class=Node, field_name='system_id') + input_data = [node.system_id for node in nodes] + self.assertItemsEqual( + input_data, + [node.system_id for node in field.clean(input_data)]) + + def test_field_ignores_duplicates(self): + nodes = [factory.make_Node() for _ in range(2)] + # Create other nodes. + [factory.make_Node() for _ in range(3)] + field = InstanceListField(model_class=Node, field_name='system_id') + input_data = [node.system_id for node in nodes] * 2 + self.assertItemsEqual( + set(input_data), + [node.system_id for node in field.clean(input_data)]) + + def test_field_rejects_invalid_data(self): + nodes = [factory.make_Node() for _ in range(3)] + field = InstanceListField(model_class=Node, field_name='system_id') + error = self.assertRaises( + ValidationError, + field.clean, [node.system_id for node in nodes] + ['unknown']) + self.assertEquals(['Unknown node(s): unknown.'], error.messages) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_licensekey.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_licensekey.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_licensekey.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_licensekey.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,200 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `LicenseKeyForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from operator import itemgetter + +from maasserver import forms +from maasserver.clusterrpc.testing.osystems import ( + make_rpc_osystem, + make_rpc_release, + ) +from maasserver.forms import LicenseKeyForm +from maasserver.models import LicenseKey +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.osystems import patch_usable_osystems +from maasserver.testing.testcase import MAASServerTestCase + + +class TestLicenseKeyForm(MAASServerTestCase): + """Tests for `LicenseKeyForm`.""" + + def make_os_with_license_key(self): + """Makes a fake operating system that has a release that requires a + license key.""" + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + return osystem, release + + def test_creates_license_key(self): + osystem, release = self.make_os_with_license_key() + key = factory.make_name('key') + self.patch_autospec(forms, 'validate_license_key').return_value = True + definition = { + 'osystem': osystem['name'], + 'distro_series': release['name'], + 'license_key': key, + } + data = definition.copy() + data['distro_series'] = '%s/%s' % (osystem['name'], release['name']) + form = LicenseKeyForm(data=data) + form.save() + license_key_obj = LicenseKey.objects.get( + osystem=osystem['name'], distro_series=release['name']) + self.assertAttributes(license_key_obj, definition) + + def test_updates_license_key(self): + osystem, release = self.make_os_with_license_key() + self.patch_autospec(forms, 'validate_license_key').return_value = True + license_key = factory.make_LicenseKey( + osystem=osystem['name'], distro_series=release['name'], + license_key=factory.make_name('key')) + new_key = factory.make_name('key') + form = LicenseKeyForm( + data={'license_key': new_key}, instance=license_key) + form.save() + license_key = reload_object(license_key) + self.assertEqual(new_key, license_key.license_key) + + def test_validates_license_key(self): + osystem, release = self.make_os_with_license_key() + self.patch_autospec(forms, 'validate_license_key').return_value = False + license_key = factory.make_LicenseKey( + osystem=osystem['name'], distro_series=release['name'], + license_key=factory.make_name('key')) + new_key = factory.make_name('key') + form = LicenseKeyForm( + data={'license_key': new_key}, instance=license_key) + self.assertFalse(form.is_valid(), form.errors) + self.assertEqual( + {'__all__': ['Invalid license key.']}, + form.errors) + + def test_handles_missing_osystem_in_distro_series(self): + osystem, release = self.make_os_with_license_key() + self.patch_autospec(forms, 'validate_license_key').return_value = True + key = factory.make_name('key') + definition = { + 'osystem': osystem['name'], + 'distro_series': release['name'], + 'license_key': key, + } + form = LicenseKeyForm(data=definition.copy()) + form.save() + license_key_obj = LicenseKey.objects.get( + osystem=osystem['name'], distro_series=release['name']) + self.assertAttributes(license_key_obj, definition) + + def test_requires_all_fields(self): + form = LicenseKeyForm(data={}) + self.assertFalse(form.is_valid(), form.errors) + self.assertItemsEqual( + ['osystem', 'distro_series', 'license_key'], + form.errors.keys()) + + def test_errors_on_not_unique(self): + osystem, release = self.make_os_with_license_key() + self.patch_autospec(forms, 'validate_license_key').return_value = True + key = factory.make_name('key') + factory.make_LicenseKey( + osystem=osystem['name'], distro_series=release['name'], + license_key=key) + definition = { + 'osystem': osystem['name'], + 'distro_series': release['name'], + 'license_key': key, + } + form = LicenseKeyForm(data=definition) + self.assertFalse(form.is_valid(), form.errors) + self.assertEqual({ + '__all__': ['%s %s' % ( + "License key with this operating system and distro series", + "already exists.")]}, + form.errors) + + def test_doesnt_include_default_osystem(self): + form = LicenseKeyForm() + self.assertNotIn(('', 'Default OS'), form.fields['osystem'].choices) + + def test_includes_osystem_in_choices(self): + osystems = [] + for _ in range(3): + release = make_rpc_release(requires_license_key=True) + osystems.append(make_rpc_osystem(releases=[release])) + patch_usable_osystems(self, osystems=osystems) + choices = [ + (osystem['name'], osystem['title']) + for osystem in osystems + ] + form = LicenseKeyForm() + self.assertItemsEqual(choices, form.fields['osystem'].choices) + + def test_includes_all_osystems_sorted(self): + osystems = [] + for _ in range(3): + release = make_rpc_release(requires_license_key=True) + osystems.append(make_rpc_osystem(releases=[release])) + patch_usable_osystems(self, osystems=osystems) + choices = [ + (osystem['name'], osystem['title']) + for osystem in sorted(osystems, key=itemgetter('title')) + ] + form = LicenseKeyForm() + self.assertEqual(choices, form.fields['osystem'].choices) + + def test_includes_only_osystems_that_require_license_keys(self): + osystems = [] + for _ in range(2): + release = make_rpc_release(requires_license_key=True) + osystems.append(make_rpc_osystem(releases=[release])) + patch_usable_osystems(self, osystems=osystems + [make_rpc_osystem()]) + choices = [ + (osystem['name'], osystem['title']) + for osystem in sorted(osystems, key=itemgetter('title')) + ] + form = LicenseKeyForm() + self.assertEquals(choices, form.fields['osystem'].choices) + + def test_doesnt_include_default_distro_series(self): + form = LicenseKeyForm() + self.assertNotIn( + ('', 'Default OS Release'), form.fields['distro_series'].choices) + + def test_includes_all_distro_series(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + patch_usable_osystems(self, osystems=[osystem]) + choices = [ + ('%s/%s' % (osystem['name'], release['name']), release['title']) + for release in releases + ] + form = LicenseKeyForm() + self.assertItemsEqual(choices, form.fields['distro_series'].choices) + + def test_includes_only_distro_series_that_require_license_keys(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + no_key_release = make_rpc_release() + osystem = make_rpc_osystem(releases=releases + [no_key_release]) + patch_usable_osystems(self, osystems=[osystem]) + choices = [ + ('%s/%s' % (osystem['name'], release['name']), release['title']) + for release in releases + ] + form = LicenseKeyForm() + self.assertItemsEqual(choices, form.fields['distro_series'].choices) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_macaddress.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_macaddress.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_macaddress.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_macaddress.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,58 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `MACAddressForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.forms import MACAddressForm +from maasserver.models import MACAddress +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase + + +class TestMACAddressForm(MAASServerTestCase): + + def test_MACAddressForm_creates_mac_address(self): + node = factory.make_Node() + mac = factory.make_mac_address() + form = MACAddressForm(node=node, data={'mac_address': mac}) + form.save() + self.assertTrue( + MACAddress.objects.filter(node=node, mac_address=mac).exists()) + + def test_saves_to_db_by_default(self): + node = factory.make_Node() + mac = factory.make_mac_address() + form = MACAddressForm(node=node, data={'mac_address': mac}) + form.save() + self.assertEqual( + mac, MACAddress.objects.get(mac_address=mac).mac_address) + + def test_does_not_save_to_db_if_commit_is_False(self): + node = factory.make_Node() + mac = factory.make_mac_address() + form = MACAddressForm(node=node, data={'mac_address': mac}) + form.save(commit=False) + self.assertItemsEqual([], MACAddress.objects.filter(mac_address=mac)) + + def test_MACAddressForm_displays_error_message_if_mac_already_used(self): + mac = factory.make_mac_address() + node = factory.make_MACAddress_with_Node(address=mac) + node = factory.make_Node() + form = MACAddressForm(node=node, data={'mac_address': mac}) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'mac_address': ['This MAC address is already registered.']}, + form._errors) + self.assertFalse( + MACAddress.objects.filter(node=node, mac_address=mac).exists()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_merge_error_messages.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_merge_error_messages.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_merge_error_messages.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_merge_error_messages.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,54 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `merge_error_messages`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.forms import ( + MAX_MESSAGES, + merge_error_messages, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase + + +class TestMergeErrorMessages(MAASServerTestCase): + + def test_merge_error_messages_returns_summary_message(self): + summary = factory.make_name('summary') + errors = [factory.make_name('error') for _ in range(2)] + result = merge_error_messages(summary, errors, 5) + self.assertEqual( + "%s (%s)" % (summary, ' \u2014 '.join(errors)), result) + + def test_merge_error_messages_includes_limited_number_of_msgs(self): + summary = factory.make_name('summary') + errors = [ + factory.make_name('error') + for _ in range(MAX_MESSAGES + 2)] + result = merge_error_messages(summary, errors) + self.assertEqual( + "%s (%s and 2 more errors)" % ( + summary, ' \u2014 '.join(errors[:MAX_MESSAGES])), + result) + + def test_merge_error_messages_with_one_more_error(self): + summary = factory.make_name('summary') + errors = [ + factory.make_name('error') + for _ in range(MAX_MESSAGES + 1)] + result = merge_error_messages(summary, errors) + self.assertEqual( + "%s (%s and 1 more error)" % ( + summary, ' \u2014 '.join(errors[:MAX_MESSAGES])), + result) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_multiplechoicefield.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_multiplechoicefield.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_multiplechoicefield.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_multiplechoicefield.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,45 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for multiple-choice fields.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.core.exceptions import ValidationError +from django.core.validators import validate_email +from maasserver.forms import ( + UnconstrainedMultipleChoiceField, + ValidatorMultipleChoiceField, + ) +from maasserver.testing.testcase import MAASServerTestCase + + +class TestUnconstrainedMultipleChoiceField(MAASServerTestCase): + + def test_accepts_list(self): + value = ['a', 'b'] + instance = UnconstrainedMultipleChoiceField() + self.assertEqual(value, instance.clean(value)) + + +class TestValidatorMultipleChoiceField(MAASServerTestCase): + + def test_field_validates_valid_data(self): + value = ['test@example.com', 'me@example.com'] + field = ValidatorMultipleChoiceField(validator=validate_email) + self.assertEqual(value, field.clean(value)) + + def test_field_uses_validator(self): + value = ['test@example.com', 'invalid-email'] + field = ValidatorMultipleChoiceField(validator=validate_email) + error = self.assertRaises(ValidationError, field.clean, value) + self.assertEquals(['Enter a valid email address.'], error.messages) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_network.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_network.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_network.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_network.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,298 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `NetworkForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver import forms as forms_module +from maasserver.dns import config as dns_config_module +from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT +from maasserver.forms import ( + create_Network_from_NodeGroupInterface, + NetworkForm, + ) +from maasserver.models import ( + MACAddress, + Network, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import MockCalledOnceWith +from netaddr import IPNetwork +from testtools.matchers import Contains + + +class TestNetworkForm(MAASServerTestCase): + """Tests for `NetworkForm`.""" + + def test_creates_network(self): + network = factory.make_ipv4_network() + name = factory.make_name('network') + definition = { + 'name': name, + 'description': factory.make_string(), + 'ip': "%s" % network.cidr.ip, + 'netmask': "%s" % network.netmask, + 'vlan_tag': factory.make_vlan_tag(), + } + form = NetworkForm(data=definition) + form.save() + network_obj = Network.objects.get(name=name) + self.assertAttributes(network_obj, definition) + + def test_network_creation_fails_if_empty_IP(self): + network = factory.make_ipv4_network() + name = factory.make_name('network') + definition = { + 'name': name, + 'description': factory.make_string(), + 'ip': "", + 'netmask': "%s" % network.netmask, + 'vlan_tag': factory.make_vlan_tag(), + } + form = NetworkForm(data=definition) + self.assertFalse(form.is_valid()) + self.assertEqual( + ["This field is required."], form.errors['ip']) + + def test_network_creation_fails_if_empty_netmask(self): + network = factory.make_ipv4_network() + name = factory.make_name('network') + definition = { + 'name': name, + 'description': factory.make_string(), + 'ip': "%s" % network.cidr.ip, + 'netmask': "", + 'vlan_tag': factory.make_vlan_tag(), + } + form = NetworkForm(data=definition) + self.assertFalse(form.is_valid()) + self.assertEqual( + ["This field is required."], form.errors['netmask']) + + def test_updates_network(self): + network = factory.make_Network() + new_description = factory.make_string() + form = NetworkForm( + data={'description': new_description}, instance=network) + form.save() + network = reload_object(network) + self.assertEqual(new_description, network.description) + + def test_populates_initial_macaddresses(self): + network = factory.make_Network() + macs = [ + factory.make_MACAddress_with_Node(networks=[network]) + for _ in range(3)] + # Create other MAC addresses. + for _ in range(2): + factory.make_MACAddress_with_Node( + networks=[factory.make_Network()]) + new_description = factory.make_string() + form = NetworkForm( + data={'description': new_description}, instance=network) + self.assertItemsEqual( + [mac.mac_address.get_raw() for mac in macs], + form.initial['mac_addresses']) + + def test_macaddresses_are_sorted(self): + network1, network2 = factory.make_Networks(2) + macs = [ + factory.make_MACAddress_with_Node(networks=[network1]) + for _ in range(3)] + # Create macs connected to the same node. + macs = macs + [ + factory.make_MACAddress(networks=[network1], node=macs[0].node) + for _ in range(3)] + # Create other MAC addresses. + for _ in range(2): + factory.make_MACAddress_with_Node(networks=[network2]) + form = NetworkForm(data={}, instance=network1) + self.assertEqual( + list(MACAddress.objects.all().order_by( + 'node__hostname', 'mac_address')), + list(form.fields['mac_addresses'].queryset)) + + def test_macaddresses_widget_displays_MAC_and_node_hostname(self): + networks = factory.make_Networks(3) + same_network = networks[0] + misc_networks = networks[1:] + for _ in range(3): + factory.make_MACAddress_with_Node(networks=[same_network]) + # Create other MAC addresses. + for network in misc_networks: + factory.make_MACAddress_with_Node(networks=[network]) + form = NetworkForm(data={}, instance=same_network) + self.assertItemsEqual( + [(mac.mac_address, "%s (%s)" % ( + mac.mac_address, mac.node.hostname)) + for mac in MACAddress.objects.all()], + form.fields['mac_addresses'].widget.choices) + + def test_updates_macaddresses(self): + network = factory.make_Network() + # Attach a couple of MAC addresses to the network. + [factory.make_MACAddress_with_Node(networks=[network]) + for _ in range(3)] + new_macs = [ + factory.make_MACAddress_with_Node() + for _ in range(3)] + form = NetworkForm( + data={ + 'mac_addresses': [ + mac.mac_address.get_raw() for mac in new_macs], + }, + instance=network) + form.save() + network = reload_object(network) + self.assertItemsEqual(new_macs, network.macaddress_set.all()) + + def test_deletes_macaddresses_by_default_if_not_specified(self): + network = factory.make_Network() + [factory.make_MACAddress_with_Node(networks=[network]) + for _ in range(3)] + form = NetworkForm( + data={ + 'name': "foo", + }, + instance=network) + form.save() + network = reload_object(network) + self.assertItemsEqual([], network.macaddress_set.all()) + + def test_does_not_delete_unspecified_macaddresses_if_told_not_to(self): + network = factory.make_Network() + macs = [ + factory.make_MACAddress_with_Node(networks=[network]) + for _ in range(3)] + form = NetworkForm( + data={ + 'name': "foo", + }, + instance=network, + delete_macs_if_not_present=False, + ) + form.save() + network = reload_object(network) + self.assertItemsEqual(macs, network.macaddress_set.all()) + + def test_reports_clashes(self): + # The uniqueness test on the Network model raises a ValidationError + # when it finds a clash, but Django is prone to crashing when the + # exception doesn't take the expected form (bug 1299114). + big_network = IPNetwork('10.9.0.0/16') + nested_network = IPNetwork('10.9.9.0/24') + + existing_network = factory.make_Network(network=big_network) + form = NetworkForm(data={ + 'name': factory.make_name('clashing-network'), + 'ip': "%s" % nested_network.cidr.ip, + 'netmask': "%s" % nested_network.netmask, + 'vlan_tag': factory.make_vlan_tag(), + }) + self.assertFalse(form.is_valid()) + message = "IP range clashes with network '%s'." % existing_network.name + self.assertEqual( + { + 'ip': [message], + 'netmask': [message], + }, + form.errors) + + def test_writes_dns_when_network_edited(self): + write_full_dns_config = self.patch( + dns_config_module, "write_full_dns_config") + network = factory.make_ipv4_network() + name = factory.make_name('network') + definition = { + 'name': name, + 'description': factory.make_string(), + 'ip': "%s" % network.cidr.ip, + 'netmask': "%s" % network.netmask, + 'vlan_tag': factory.make_vlan_tag(), + } + form = NetworkForm(data=definition) + form.save() + self.assertThat(write_full_dns_config, MockCalledOnceWith()) + + def test_writes_dns_when_network_deleted(self): + network = factory.make_Network() + write_full_dns_config = self.patch( + dns_config_module, "write_full_dns_config") + network.delete() + self.assertThat(write_full_dns_config, MockCalledOnceWith()) + + +class TestCreateNetworkFromNodeGroupInterface(MAASServerTestCase): + + def test_skips_creation_if_netmask_undefined(self): + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface( + nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + interface.subnet_mask = None + network = create_Network_from_NodeGroupInterface(interface) + self.assertIsNone(network) + self.assertItemsEqual([], Network.objects.all()) + + def test_creates_network_without_vlan(self): + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface(nodegroup) + network = create_Network_from_NodeGroupInterface(interface) + definition = { + 'name': "%s-%s" % ( + interface.nodegroup.name, interface.interface), + 'description': ( + "Auto created when creating interface %s on cluster %s" % ( + interface.name, interface.nodegroup.name)), + 'ip': "%s" % interface.network.ip, + 'netmask': "%s" % interface.network.netmask, + 'vlan_tag': None, + } + network_obj = Network.objects.get(id=network.id) + self.assertAttributes(network_obj, definition) + + def test_creates_network_with_vlan(self): + nodegroup = factory.make_NodeGroup() + intf = 'eth0' + vlan = 1 + interface = factory.make_NodeGroupInterface( + nodegroup, interface="%s.%d" % (intf, vlan)) + network = create_Network_from_NodeGroupInterface(interface) + net_name = "%s-%s" % (interface.nodegroup.name, interface.interface) + net_name = net_name.replace('.', '-') + definition = { + 'name': net_name, + 'description': ( + "Auto created when creating interface %s on cluster %s" % ( + interface.name, interface.nodegroup.name)), + 'ip': "%s" % interface.network.ip, + 'netmask': "%s" % interface.network.netmask, + 'vlan_tag': vlan, + } + network_obj = Network.objects.get(id=network.id) + self.assertAttributes(network_obj, definition) + + def test_skips_creation_if_network_already_exists(self): + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface(nodegroup) + create_Network_from_NodeGroupInterface(interface) + maaslog = self.patch(forms_module, 'maaslog') + + self.assertIsNone(create_Network_from_NodeGroupInterface(interface)) + self.assertEqual( + 1, maaslog.warning.call_count, + "maaslog.warning hasn't been called") + self.assertThat( + maaslog.warning.call_args[0][0], + Contains("Failed to create Network")) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodeaction.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodeaction.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodeaction.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodeaction.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,117 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `NodeActionForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.contrib import messages +from maasserver.enum import ( + NODE_BOOT, + NODE_STATUS, + ) +from maasserver.exceptions import NodeActionError +from maasserver.forms import ( + get_action_form, + NodeActionForm, + ) +from maasserver.node_action import ( + Commission, + Delete, + MarkBroken, + StartNode, + UseCurtin, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase + + +class TestNodeActionForm(MAASServerTestCase): + + def test_get_action_form_creates_form_class_with_attributes(self): + user = factory.make_admin() + form_class = get_action_form(user) + + self.assertEqual(user, form_class.user) + + def test_get_action_form_creates_form_class(self): + user = factory.make_admin() + node = factory.make_Node(status=NODE_STATUS.NEW) + form = get_action_form(user)(node) + + self.assertIsInstance(form, NodeActionForm) + self.assertEqual(node, form.node) + + def test_get_action_form_for_admin(self): + admin = factory.make_admin() + node = factory.make_Node( + status=NODE_STATUS.NEW, boot_type=NODE_BOOT.DEBIAN) + form = get_action_form(admin)(node) + + self.assertItemsEqual( + [Commission.name, Delete.name, UseCurtin.name, MarkBroken.name], + form.actions) + + def test_get_action_form_for_user(self): + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.NEW) + form = get_action_form(user)(node) + + self.assertIsInstance(form, NodeActionForm) + self.assertEqual(node, form.node) + self.assertItemsEqual({}, form.actions) + + def test_save_performs_requested_action(self): + admin = factory.make_admin() + node = factory.make_Node(status=NODE_STATUS.NEW) + form = get_action_form(admin)( + node, {NodeActionForm.input_name: Commission.name}) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) + + def test_rejects_disallowed_action(self): + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.NEW) + form = get_action_form(user)( + node, {NodeActionForm.input_name: Commission.name}) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'action': ['Not a permitted action: %s.' % Commission.name]}, + form._errors) + + def test_rejects_unknown_action(self): + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.NEW) + action = factory.make_string() + form = get_action_form(user)( + node, {NodeActionForm.input_name: action}) + self.assertFalse(form.is_valid()) + self.assertIn( + "is not one of the available choices.", form._errors['action'][0]) + + def test_shows_error_message_for_NodeActionError(self): + error_text = factory.make_string(prefix="NodeActionError") + exc = NodeActionError(error_text) + self.patch(StartNode, "execute").side_effect = exc + user = factory.make_User() + node = factory.make_Node( + status=NODE_STATUS.ALLOCATED, owner=user) + action = StartNode.name + # Required for messages to work: + request = factory.make_fake_request("/fake") + form = get_action_form(user, request)( + node, {NodeActionForm.input_name: action}) + form.save() + [observed] = messages.get_messages(form.request) + expected = (messages.ERROR, error_text, '') + self.assertEqual(expected, observed) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodegroupinterface.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodegroupinterface.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodegroupinterface.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodegroupinterface.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,431 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test forms.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import random + +from maasserver.enum import ( + NODEGROUP_STATUS, + NODEGROUPINTERFACE_MANAGEMENT, + ) +from maasserver.forms import ( + ERROR_MESSAGE_DYNAMIC_RANGE_SPANS_SLASH_16S, + ERROR_MESSAGE_STATIC_RANGE_IN_USE, + NodeGroupInterfaceForm, + ) +from maasserver.models import ( + Network, + NodeGroupInterface, + ) +from maasserver.models.staticipaddress import StaticIPAddress +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.utils.interfaces import ( + get_name_and_vlan_from_cluster_interface, + ) +from maastesting.matchers import MockCalledOnceWith +from netaddr import ( + IPAddress, + IPNetwork, + ) +from testtools.matchers import ( + AllMatch, + Equals, + MatchesStructure, + StartsWith, + ) + + +nullable_fields = [ + 'subnet_mask', 'broadcast_ip', 'router_ip', 'ip_range_low', + 'ip_range_high', 'static_ip_range_low', 'static_ip_range_high', + ] + + +def make_ngi_instance(nodegroup=None): + """Create a `NodeGroupInterface` with nothing set but `nodegroup`. + + This is used by tests to instantiate the cluster interface form for + a given cluster. We create an initial cluster interface object just + to tell it which cluster that is. + """ + if nodegroup is None: + nodegroup = factory.make_NodeGroup() + return NodeGroupInterface(nodegroup=nodegroup) + + +class TestNodeGroupInterfaceForm(MAASServerTestCase): + + def test__validates_parameters(self): + form = NodeGroupInterfaceForm( + data={'ip': factory.make_string()}, + instance=make_ngi_instance()) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'ip': ['Enter a valid IPv4 or IPv6 address.']}, form._errors) + + def test__can_save_fields_being_None(self): + int_settings = factory.get_interface_fields() + int_settings['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED + for field_name in nullable_fields: + del int_settings[field_name] + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + interface = form.save() + field_values = [ + getattr(interface, field_name) for field_name in nullable_fields] + self.assertThat(field_values, AllMatch(Equals(''))) + + def test__uses_name_if_given(self): + name = factory.make_name('explicit-name') + int_settings = factory.get_interface_fields() + int_settings['name'] = name + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + interface = form.save() + self.assertEqual(name, interface.name) + + def test__lets_name_default_to_network_interface_name(self): + int_settings = factory.get_interface_fields() + int_settings['interface'] = factory.make_name('ether') + del int_settings['name'] + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + interface = form.save() + self.assertEqual(int_settings['interface'], interface.name) + + def test__escapes_interface_name(self): + int_settings = factory.get_interface_fields() + int_settings['interface'] = 'eth1+1' + del int_settings['name'] + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + interface = form.save() + self.assertEqual('eth1--1', interface.name) + + def test__defaults_to_unique_name_if_no_name_or_interface_given(self): + int_settings = factory.get_interface_fields( + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + del int_settings['name'] + del int_settings['interface'] + form1 = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + interface1 = form1.save() + form2 = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + interface2 = form2.save() + self.assertNotIn(interface1.name, [None, '']) + self.assertNotIn(interface2.name, [None, '']) + self.assertNotEqual(interface1.name, interface2.name) + + def test__disambiguates_default_name(self): + cluster = factory.make_NodeGroup() + existing_interface = factory.make_NodeGroupInterface(cluster) + int_settings = factory.get_interface_fields() + del int_settings['name'] + int_settings['interface'] = existing_interface.name + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance(cluster)) + interface = form.save() + self.assertThat(interface.name, StartsWith(int_settings['interface'])) + self.assertNotEqual(int_settings['interface'], interface.name) + + def test__disambiguates_IPv4_interface_with_ipv4_suffix(self): + cluster = factory.make_NodeGroup() + existing_interface = factory.make_NodeGroupInterface( + cluster, network=factory.make_ipv4_network()) + int_settings = factory.get_interface_fields() + del int_settings['name'] + int_settings['interface'] = existing_interface.name + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance(cluster)) + interface = form.save() + self.assertEqual('%s-ipv4' % int_settings['interface'], interface.name) + + def test__disambiguates_IPv6_interface_with_ipv6_suffix(self): + cluster = factory.make_NodeGroup() + existing_interface = factory.make_NodeGroupInterface(cluster) + int_settings = factory.get_interface_fields( + network=factory.make_ipv6_network(slash=64)) + del int_settings['name'] + int_settings['interface'] = existing_interface.name + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance(cluster)) + interface = form.save() + self.assertThat( + interface.name, + StartsWith('%s-ipv6-' % int_settings['interface'])) + + def test__requires_netmask_on_managed_IPv4_interface(self): + network = factory.make_ipv4_network() + int_settings = factory.get_interface_fields( + network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + del int_settings['subnet_mask'] + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + self.assertFalse(form.is_valid()) + + def test__lets_netmask_default_to_64_bits_on_IPv6(self): + network = factory.make_ipv6_network() + int_settings = factory.get_interface_fields( + network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + del int_settings['subnet_mask'] + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + self.assertTrue(form.is_valid()) + interface = form.save() + self.assertEqual( + IPAddress('ffff:ffff:ffff:ffff::'), + IPAddress(interface.subnet_mask)) + + def test__accepts_netmasks_other_than_64_bits_on_IPv6(self): + netmask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffc' + network = factory.make_ipv6_network(slash=netmask) + int_settings = factory.get_interface_fields( + network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + netmask=netmask) + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + self.assertTrue(form.is_valid()) + interface = form.save() + self.assertEqual( + IPAddress(netmask), + IPAddress(interface.subnet_mask)) + + def test_validates_new_static_ip_ranges(self): + network = IPNetwork("10.1.0.0/24") + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, + network=network) + [interface] = nodegroup.get_managed_interfaces() + StaticIPAddress.objects.allocate_new( + interface.static_ip_range_low, interface.static_ip_range_high) + form = NodeGroupInterfaceForm( + data={'static_ip_range_low': '', 'static_ip_range_high': ''}, + instance=interface) + self.assertFalse(form.is_valid()) + self.assertEqual( + [ERROR_MESSAGE_STATIC_RANGE_IN_USE], + form._errors['static_ip_range_low']) + self.assertEqual( + [ERROR_MESSAGE_STATIC_RANGE_IN_USE], + form._errors['static_ip_range_high']) + + def test_rejects_ipv4_dynamic_ranges_across_multiple_slash_16s(self): + # Even if a dynamic range is < 65536 addresses, it can't cross + # two /16 networks. + network = IPNetwork("10.1.0.0/8") + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, + network=network, static_ip_range_low=None, + static_ip_range_high=None) + [interface] = nodegroup.get_managed_interfaces() + form = NodeGroupInterfaceForm( + data={ + 'ip_range_low': '10.1.255.255', + 'ip_range_high': '10.2.0.1', + }, + instance=interface) + self.assertFalse(form.is_valid()) + self.assertEqual( + [ERROR_MESSAGE_DYNAMIC_RANGE_SPANS_SLASH_16S], + form._errors['ip_range_low']) + self.assertEqual( + [ERROR_MESSAGE_DYNAMIC_RANGE_SPANS_SLASH_16S], + form._errors['ip_range_low']) + + def test_allows_sane_ipv4_dynamic_range_size(self): + network = IPNetwork("10.1.0.0/8") + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, + network=network, static_ip_range_low=None, + static_ip_range_high=None) + [interface] = nodegroup.get_managed_interfaces() + form = NodeGroupInterfaceForm( + data={ + 'ip_range_low': '10.0.0.1', + 'ip_range_high': '10.0.1.255', + }, + instance=interface) + self.assertTrue(form.is_valid()) + + def test_allows_any_size_ipv6_dynamic_range(self): + network = factory.make_ipv6_network(slash=64) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, + network=network) + [interface] = nodegroup.get_managed_interfaces() + form = NodeGroupInterfaceForm( + data={ + 'ip_range_low': IPAddress(network.first).format(), + 'ip_range_high': IPAddress(network.last).format(), + 'static_ip_range_low': '', + 'static_ip_range_high': '', + }, + instance=interface) + self.assertTrue(form.is_valid()) + + def test_calls_get_duplicate_fqdns_when_appropriate(self): + # Check for duplicate FQDNs if the NodeGroupInterface has a + # NodeGroup and is managing DNS. + int_settings = factory.get_interface_fields( + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + mock = self.patch(form, "get_duplicate_fqdns") + self.assertTrue(form.is_valid(), form.errors) + self.assertThat(mock, MockCalledOnceWith()) + + def test_reports_error_if_fqdns_duplicated(self): + int_settings = factory.get_interface_fields( + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) + form = NodeGroupInterfaceForm( + data=int_settings, instance=make_ngi_instance()) + mock = self.patch(form, "get_duplicate_fqdns") + hostnames = [ + factory.make_hostname("duplicate") for _ in range(0, 3)] + mock.return_value = hostnames + self.assertFalse(form.is_valid()) + message = "Enabling DNS management creates duplicate FQDN(s): %s." % ( + ", ".join(set(hostnames))) + self.assertEqual( + {'management': [message]}, + form.errors) + + def test_identifies_duplicate_fqdns_in_nodegroup(self): + # Don't allow DNS management to be enabled when it would + # cause more than one node on the nodegroup to have the + # same FQDN. + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + base_hostname = factory.make_hostname("host") + full_hostnames = [ + "%s.%s" % (base_hostname, factory.make_hostname("domain")) + for _ in range(0, 2)] + for hostname in full_hostnames: + factory.make_Node(hostname=hostname, nodegroup=nodegroup) + [interface] = nodegroup.get_managed_interfaces() + data = {"management": NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS} + form = NodeGroupInterfaceForm(data=data, instance=interface) + duplicates = form.get_duplicate_fqdns() + expected_duplicates = set(["%s.%s" % (base_hostname, nodegroup.name)]) + self.assertEqual(expected_duplicates, duplicates) + + def test_identifies_duplicate_fqdns_across_nodegroups(self): + # Don't allow DNS management to be enabled when it would + # cause a node in this nodegroup to have the same FQDN + # as a node in another nodegroup. + + conflicting_domain = factory.make_hostname("conflicting-domain") + nodegroup_a = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + name=conflicting_domain) + conflicting_hostname = factory.make_hostname("conflicting-hostname") + factory.make_Node( + hostname="%s.%s" % (conflicting_hostname, conflicting_domain), + nodegroup=nodegroup_a) + + nodegroup_b = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + name=conflicting_domain) + factory.make_Node( + hostname="%s.%s" % ( + conflicting_hostname, factory.make_hostname("other-domain")), + nodegroup=nodegroup_b) + + [interface] = nodegroup_b.get_managed_interfaces() + data = {"management": NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS} + form = NodeGroupInterfaceForm(data=data, instance=interface) + duplicates = form.get_duplicate_fqdns() + expected_duplicates = set( + ["%s.%s" % (conflicting_hostname, conflicting_domain)]) + self.assertEqual(expected_duplicates, duplicates) + + +class TestNodeGroupInterfaceFormNetworkCreation(MAASServerTestCase): + """Tests for when NodeGroupInterfaceForm creates a Network.""" + + def test_creates_network_name(self): + int_settings = factory.get_interface_fields() + int_settings['interface'] = 'eth0:1' + interface = make_ngi_instance() + form = NodeGroupInterfaceForm(data=int_settings, instance=interface) + form.save() + [network] = Network.objects.all() + expected, _ = get_name_and_vlan_from_cluster_interface( + interface.nodegroup.name, interface.interface) + self.assertEqual(expected, network.name) + + def test_sets_vlan_tag(self): + int_settings = factory.get_interface_fields() + vlan_tag = random.randint(1, 10) + int_settings['interface'] = 'eth0.%s' % vlan_tag + interface = make_ngi_instance() + form = NodeGroupInterfaceForm(data=int_settings, instance=interface) + form.save() + [network] = Network.objects.all() + self.assertEqual(vlan_tag, network.vlan_tag) + + def test_vlan_tag_is_None_if_no_vlan(self): + int_settings = factory.get_interface_fields() + int_settings['interface'] = 'eth0:1' + interface = make_ngi_instance() + form = NodeGroupInterfaceForm(data=int_settings, instance=interface) + form.save() + [network] = Network.objects.all() + self.assertIs(None, network.vlan_tag) + + def test_sets_network_values(self): + int_settings = factory.get_interface_fields() + interface = make_ngi_instance() + form = NodeGroupInterfaceForm(data=int_settings, instance=interface) + form.save() + [network] = Network.objects.all() + expected_net_address = unicode(interface.network.network) + expected_netmask = unicode(interface.network.netmask) + self.assertThat( + network, MatchesStructure.byEquality( + ip=expected_net_address, + netmask=expected_netmask)) + + def test_does_not_create_new_network_if_already_exists(self): + int_settings = factory.get_interface_fields() + interface = make_ngi_instance() + form = NodeGroupInterfaceForm(data=int_settings, instance=interface) + # The easiest way to pre-create the same network is just to save + # the form twice. + form.save() + [existing_network] = Network.objects.all() + form.save() + self.assertItemsEqual([existing_network], Network.objects.all()) + + def test_creates_many_unique_networks(self): + names = ('eth0', 'eth0:1', 'eth0.1', 'eth0:1.2') + for name in names: + int_settings = factory.get_interface_fields() + int_settings['interface'] = name + interface = make_ngi_instance() + form = NodeGroupInterfaceForm( + data=int_settings, instance=interface) + form.save() + + self.assertEqual(len(names), len(Network.objects.all())) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodegroup.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodegroup.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodegroup.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodegroup.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,419 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for nodegroup forms.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import json +from random import randint + +from django.forms import ( + CheckboxInput, + HiddenInput, + ) +from maasserver.enum import ( + NODE_STATUS, + NODEGROUP_STATUS, + NODEGROUPINTERFACE_MANAGEMENT, + ) +from maasserver.forms import ( + INTERFACES_VALIDATION_ERROR_MESSAGE, + NodeGroupDefineForm, + NodeGroupEdit, + ) +from maasserver.models import ( + NodeGroup, + NodeGroupInterface, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase +from netaddr import IPNetwork +from provisioningserver.utils.enum import map_enum +from testtools.matchers import ( + HasLength, + MatchesStructure, + StartsWith, + ) + + +class TestNodeGroupDefineForm(MAASServerTestCase): + + def test_creates_pending_nodegroup(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + form = NodeGroupDefineForm(data={'name': name, 'uuid': uuid}) + self.assertTrue(form.is_valid(), form._errors) + nodegroup = form.save() + self.assertEqual( + (uuid, name, NODEGROUP_STATUS.PENDING, 0), + ( + nodegroup.uuid, + nodegroup.name, + nodegroup.status, + nodegroup.nodegroupinterface_set.count(), + )) + + def test_creates_nodegroup_with_status(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + form = NodeGroupDefineForm( + status=NODEGROUP_STATUS.ACCEPTED, + data={'name': name, 'uuid': uuid}) + self.assertTrue(form.is_valid(), form._errors) + nodegroup = form.save() + self.assertEqual(NODEGROUP_STATUS.ACCEPTED, nodegroup.status) + + def test_validates_parameters(self): + name = factory.make_name('name') + too_long_uuid = 'test' * 30 + form = NodeGroupDefineForm( + data={'name': name, 'uuid': too_long_uuid}) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'uuid': + ['Ensure this value has at most 36 characters (it has 120).']}, + form._errors) + + def test_rejects_invalid_json_interfaces(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + invalid_interfaces = factory.make_name('invalid_json_interfaces') + form = NodeGroupDefineForm( + data={ + 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'interfaces': ['Invalid json value.']}, + form._errors) + + def test_rejects_invalid_list_interfaces(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + invalid_interfaces = json.dumps('invalid interface list') + form = NodeGroupDefineForm( + data={ + 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'interfaces': [INTERFACES_VALIDATION_ERROR_MESSAGE]}, + form._errors) + + def test_rejects_invalid_interface(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + interface = factory.get_interface_fields() + # Make the interface invalid. + interface['ip_range_high'] = 'invalid IP address' + interfaces = json.dumps([interface]) + form = NodeGroupDefineForm( + data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) + self.assertFalse(form.is_valid()) + self.assertIn( + "Enter a valid IPv4 or IPv6 address", + form._errors['interfaces'][0]) + + def test_creates_interface_from_params(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + interface = factory.get_interface_fields() + interfaces = json.dumps([interface]) + form = NodeGroupDefineForm( + data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + nodegroup = NodeGroup.objects.get(uuid=uuid) + # Replace empty strings with None as empty strings are converted into + # None for fields with null=True. + expected_result = { + key: (value if value != '' else None) + for key, value in interface.items() + } + self.assertThat( + nodegroup.nodegroupinterface_set.all()[0], + MatchesStructure.byEquality(**expected_result)) + + def test_accepts_unnamed_cluster_interface(self): + uuid = factory.make_UUID() + interface = factory.get_interface_fields() + del interface['name'] + interfaces = json.dumps([interface]) + form = NodeGroupDefineForm( + data={ + 'name': factory.make_name('cluster'), + 'uuid': uuid, + 'interfaces': interfaces, + }) + self.assertTrue(form.is_valid(), form._errors) + cluster = form.save() + [cluster_interface] = cluster.nodegroupinterface_set.all() + self.assertEqual(interface['interface'], cluster_interface.name) + self.assertEqual(interface['interface'], cluster_interface.interface) + + def test_checks_against_conflicting_managed_networks(self): + big_network = IPNetwork('10.0.0.0/255.255.0.0') + nested_network = IPNetwork('10.0.100.0/255.255.255.0') + managed = NODEGROUPINTERFACE_MANAGEMENT.DHCP + form = NodeGroupDefineForm( + data={ + 'name': factory.make_name('cluster'), + 'uuid': factory.make_UUID(), + 'interfaces': json.dumps([ + factory.get_interface_fields( + network=big_network, management=managed), + factory.get_interface_fields( + network=nested_network, management=managed), + ]), + }) + self.assertFalse(form.is_valid()) + self.assertNotEqual([], form._errors['interfaces']) + self.assertThat( + form._errors['interfaces'][0], + StartsWith("Conflicting networks")) + + def test_ignores_conflicts_on_unmanaged_interfaces(self): + big_network = IPNetwork('10.0.0.0/255.255.0.0') + nested_network = IPNetwork('10.100.100.0/255.255.255.0') + managed = NODEGROUPINTERFACE_MANAGEMENT.DHCP + unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED + form = NodeGroupDefineForm( + data={ + 'name': factory.make_name('cluster'), + 'uuid': factory.make_UUID(), + 'interfaces': json.dumps([ + factory.get_interface_fields( + network=big_network, management=managed), + factory.get_interface_fields( + network=nested_network, management=unmanaged), + ]), + }) + is_valid = form.is_valid() + self.assertEqual( + (True, None), + (is_valid, form._errors.get('interfaces'))) + + def test_creates_multiple_interfaces(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + interfaces = [ + factory.get_interface_fields(management=management) + for management in map_enum(NODEGROUPINTERFACE_MANAGEMENT).values() + ] + form = NodeGroupDefineForm( + data={ + 'name': name, + 'uuid': uuid, + 'interfaces': json.dumps(interfaces), + }) + self.assertTrue(form.is_valid(), form._errors) + form.save() + nodegroup = NodeGroup.objects.get(uuid=uuid) + self.assertEqual( + len(interfaces), nodegroup.nodegroupinterface_set.count()) + + def test_populates_cluster_name_default(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + form = NodeGroupDefineForm( + status=NODEGROUP_STATUS.ACCEPTED, + data={'name': name, 'uuid': uuid}) + self.assertTrue(form.is_valid(), form._errors) + nodegroup = form.save() + self.assertIn(uuid, nodegroup.cluster_name) + + def test_populates_cluster_name(self): + cluster_name = factory.make_name('cluster_name') + uuid = factory.make_UUID() + form = NodeGroupDefineForm( + status=NODEGROUP_STATUS.ACCEPTED, + data={'cluster_name': cluster_name, 'uuid': uuid}) + self.assertTrue(form.is_valid(), form._errors) + nodegroup = form.save() + self.assertEqual(cluster_name, nodegroup.cluster_name) + + def test_creates_unmanaged_interfaces(self): + name = factory.make_name('name') + uuid = factory.make_UUID() + interface = factory.get_interface_fields() + del interface['management'] + interfaces = json.dumps([interface]) + form = NodeGroupDefineForm( + data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) + self.assertTrue(form.is_valid(), form._errors) + form.save() + uuid_nodegroup = NodeGroup.objects.get(uuid=uuid) + self.assertEqual( + [NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED], + [ + nodegroup.management for nodegroup in + uuid_nodegroup.nodegroupinterface_set.all() + ]) + + def test_gives_disambiguation_preference_to_IPv4(self): + network_interface = factory.make_name('eth', sep='') + ipv4_network = factory.make_ipv4_network() + # We'll be creating a cluster with two interfaces, both using the same + # network interface: an IPv4 one and an IPv6 one. + # We randomise the ordering of this list to rule out special treatment + # based on definition order. + interfaces = sorted( + [ + factory.get_interface_fields( + network=factory.make_ipv6_network(slash=64), + interface=network_interface), + factory.get_interface_fields( + network=ipv4_network, interface=network_interface), + ], + cmp=lambda left, right: randint(-1, 1)) + # We're not going to pass names for these cluster interfaces, so the + # form will have to make some up based on the network interface name. + for definition in interfaces: + del definition['name'] + form = NodeGroupDefineForm( + data={ + 'name': factory.make_name('cluster'), + 'uuid': factory.make_UUID(), + 'interfaces': json.dumps(interfaces), + }) + self.assertTrue(form.is_valid(), form._errors) + cluster = form.save() + # All of the cluster interfaces' names are unique and based on the + # network interface name, but the IPv4 one gets the unadorned name. + interfaces_by_name = { + interface.name: interface + for interface in cluster.nodegroupinterface_set.all() + } + self.expectThat(interfaces_by_name, HasLength(len(interfaces))) + self.assertIn(network_interface, interfaces_by_name) + self.assertEqual( + ipv4_network, + interfaces_by_name[network_interface].network) + + +class TestNodeGroupEdit(MAASServerTestCase): + + def make_form_data(self, nodegroup): + """Create `NodeGroupEdit` form data based on `nodegroup`.""" + return { + 'name': nodegroup.name, + 'cluster_name': nodegroup.cluster_name, + 'status': nodegroup.status, + } + + def test_changes_name(self): + nodegroup = factory.make_NodeGroup(name=factory.make_name('old-name')) + new_name = factory.make_name('new-name') + data = self.make_form_data(nodegroup) + data['name'] = new_name + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(new_name, reload_object(nodegroup).name) + + def test_refuses_name_change_if_dns_managed_and_nodes_in_use(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + data = self.make_form_data(nodegroup) + data['name'] = factory.make_name('new-name') + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertFalse(form.is_valid()) + + def test_accepts_unchanged_name(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + original_name = nodegroup.name + form = NodeGroupEdit( + instance=nodegroup, data=self.make_form_data(nodegroup)) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(original_name, reload_object(nodegroup).name) + + def test_accepts_omitted_name(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + original_name = nodegroup.name + data = self.make_form_data(nodegroup) + del data['name'] + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(original_name, reload_object(nodegroup).name) + + def test_accepts_name_change_if_nodegroup_not_accepted(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + nodegroup.status = NODEGROUP_STATUS.PENDING + data = self.make_form_data(nodegroup) + data['name'] = factory.make_name('new-name') + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertTrue(form.is_valid()) + + def test_accepts_name_change_if_dns_managed_but_no_nodes_in_use(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + node.status = NODE_STATUS.READY + node.save() + data = self.make_form_data(nodegroup) + data['name'] = factory.make_name('new-name') + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(data['name'], reload_object(nodegroup).name) + + def test_accepts_name_change_if_nodes_in_use_but_dns_not_managed(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + [interface] = nodegroup.get_managed_interfaces() + interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP + interface.save() + data = self.make_form_data(nodegroup) + data['name'] = factory.make_name('new-name') + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(data['name'], reload_object(nodegroup).name) + + def test_accepts_name_change_if_nodegroup_has_no_interface(self): + nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() + NodeGroupInterface.objects.filter(nodegroup=nodegroup).delete() + data = self.make_form_data(nodegroup) + data['name'] = factory.make_name('new-name') + form = NodeGroupEdit(instance=nodegroup, data=data) + self.assertTrue(form.is_valid()) + form.save() + self.assertEqual(data['name'], reload_object(nodegroup).name) + + def test_shows_default_disable_ipv4_if_managed_ipv6_configured(self): + nodegroup = factory.make_NodeGroup() + factory.make_NodeGroupInterface( + nodegroup, network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + form = NodeGroupEdit(instance=nodegroup) + self.assertIsInstance( + form.fields['default_disable_ipv4'].widget, CheckboxInput) + + def test_hides_default_disable_ipv4_if_no_managed_ipv6_configured(self): + nodegroup = factory.make_NodeGroup() + eth = factory.make_name('eth') + factory.make_NodeGroupInterface( + nodegroup, network=factory.make_ipv4_network(), interface=eth, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) + factory.make_NodeGroupInterface( + nodegroup, network=factory.make_ipv6_network(), interface=eth, + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + form = NodeGroupEdit(instance=nodegroup) + self.assertIsInstance( + form.fields['default_disable_ipv4'].widget, HiddenInput) + + def test_default_disable_ipv4_field_ignores_other_nodegroups(self): + factory.make_NodeGroupInterface( + factory.make_NodeGroup(), network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + nodegroup = factory.make_NodeGroup() + form = NodeGroupEdit(instance=nodegroup) + self.assertIsInstance( + form.fields['default_disable_ipv4'].widget, HiddenInput) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_node.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_node.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_node.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_node.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,584 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for node forms.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from crochet import TimeoutError +from django.forms import ( + CheckboxInput, + HiddenInput, + ) +from maasserver import forms +from maasserver.clusterrpc.power_parameters import get_power_type_choices +from maasserver.clusterrpc.testing.osystems import ( + make_rpc_osystem, + make_rpc_release, + ) +from maasserver.enum import ( + NODEGROUP_STATUS, + NODEGROUPINTERFACE_MANAGEMENT, + ) +from maasserver.forms import ( + AdminNodeForm, + BLANK_CHOICE, + NO_ARCHITECTURES_AVAILABLE, + NodeForm, + pick_default_architecture, + ) +import maasserver.forms as forms_module +from maasserver.testing.architecture import ( + make_usable_architecture, + patch_usable_architectures, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.osystems import ( + make_osystem_with_releases, + make_usable_osystem, + patch_usable_osystems, + ) +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import MockCalledOnceWith +from provisioningserver.rpc.exceptions import ( + NoConnectionsAvailable, + NoSuchOperatingSystem, + ) + + +class TestNodeForm(MAASServerTestCase): + + def test_contains_limited_set_of_fields(self): + form = NodeForm() + + self.assertEqual( + [ + 'hostname', + 'architecture', + 'osystem', + 'distro_series', + 'license_key', + 'disable_ipv4', + 'boot_type', + 'nodegroup' + ], list(form.fields)) + + def test_changes_node(self): + node = factory.make_Node() + hostname = factory.make_string() + patch_usable_architectures(self, [node.architecture]) + + form = NodeForm( + data={ + 'hostname': hostname, + 'architecture': make_usable_architecture(self), + }, + instance=node) + form.save() + + self.assertEqual(hostname, node.hostname) + + def test_accepts_usable_architecture(self): + arch = make_usable_architecture(self) + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': arch, + }) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_unusable_architecture(self): + patch_usable_architectures(self) + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': factory.make_name('arch'), + }) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['architecture'], form._errors.keys()) + + def test_starts_with_default_architecture(self): + arches = sorted([factory.make_name('arch') for _ in range(5)]) + patch_usable_architectures(self, arches) + form = NodeForm() + self.assertEqual( + pick_default_architecture(arches), + form.fields['architecture'].initial) + + def test_adds_blank_default_when_no_arches_available(self): + patch_usable_architectures(self, []) + form = NodeForm() + self.assertEqual( + [BLANK_CHOICE], + form.fields['architecture'].choices) + + def test_adds_error_when_no_arches_available(self): + patch_usable_architectures(self, []) + form = NodeForm() + self.assertFalse(form.is_valid()) + self.assertEqual( + [NO_ARCHITECTURES_AVAILABLE], + form.errors['architecture']) + + def test_accepts_osystem(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + osystem = make_usable_osystem(self) + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + }, + instance=node) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_invalid_osystem(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + patch_usable_osystems(self) + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': make_usable_architecture(self), + 'osystem': factory.make_name('os'), + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['osystem'], form._errors.keys()) + + def test_starts_with_default_osystem(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + osystems = [make_osystem_with_releases(self) for _ in range(5)] + patch_usable_osystems(self, osystems) + form = NodeForm(instance=node) + self.assertEqual( + '', + form.fields['osystem'].initial) + + def test_accepts_osystem_distro_series(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + osystem = make_usable_osystem(self) + release = osystem['default_release'] + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s' % (osystem['name'], release), + }, + instance=node) + self.assertTrue(form.is_valid(), form._errors) + + def test_rejects_invalid_osystem_distro_series(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + osystem = make_usable_osystem(self) + release = factory.make_name('release') + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s' % (osystem['name'], release), + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['distro_series'], form._errors.keys()) + + def test_starts_with_default_distro_series(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + osystems = [make_osystem_with_releases(self) for _ in range(5)] + patch_usable_osystems(self, osystems) + form = NodeForm(instance=node) + self.assertEqual( + '', + form.fields['distro_series'].initial) + + def test_rejects_mismatch_osystem_distro_series(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + osystem = make_usable_osystem(self) + release = osystem['default_release'] + invalid = factory.make_name('invalid_os') + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s' % (invalid, release), + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['distro_series'], form._errors.keys()) + + def test_rejects_when_validate_license_key_returns_False(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + license_key = factory.make_name('key') + mock_validate = self.patch(forms, 'validate_license_key') + mock_validate.return_value = False + form = NodeForm(data={ + 'hostname': factory.make_name('host'), + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s*' % (osystem['name'], release['name']), + 'license_key': license_key, + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['license_key'], form._errors.keys()) + + def test_calls_validate_license_key_for_with_nodegroup(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + license_key = factory.make_name('key') + mock_validate_for = self.patch(forms, 'validate_license_key_for') + mock_validate_for.return_value = True + form = NodeForm(data={ + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s*' % (osystem['name'], release['name']), + 'license_key': license_key, + }, + instance=node) + self.assertTrue(form.is_valid()) + self.assertThat( + mock_validate_for, + MockCalledOnceWith( + node.nodegroup, osystem['name'], release['name'], license_key)) + + def test_rejects_when_validate_license_key_for_returns_False(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + license_key = factory.make_name('key') + mock_validate_for = self.patch(forms, 'validate_license_key_for') + mock_validate_for.return_value = False + form = NodeForm(data={ + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s*' % (osystem['name'], release['name']), + 'license_key': license_key, + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['license_key'], form._errors.keys()) + + def test_rejects_when_validate_license_key_for_raise_no_connection(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + license_key = factory.make_name('key') + mock_validate_for = self.patch(forms, 'validate_license_key_for') + mock_validate_for.side_effect = NoConnectionsAvailable() + form = NodeForm(data={ + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s*' % (osystem['name'], release['name']), + 'license_key': license_key, + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['license_key'], form._errors.keys()) + + def test_rejects_when_validate_license_key_for_raise_timeout(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + license_key = factory.make_name('key') + mock_validate_for = self.patch(forms, 'validate_license_key_for') + mock_validate_for.side_effect = TimeoutError() + form = NodeForm(data={ + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s*' % (osystem['name'], release['name']), + 'license_key': license_key, + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['license_key'], form._errors.keys()) + + def test_rejects_when_validate_license_key_for_raise_no_os(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + patch_usable_osystems(self, osystems=[osystem]) + license_key = factory.make_name('key') + mock_validate_for = self.patch(forms, 'validate_license_key_for') + mock_validate_for.side_effect = NoSuchOperatingSystem() + form = NodeForm(data={ + 'architecture': make_usable_architecture(self), + 'osystem': osystem['name'], + 'distro_series': '%s/%s*' % (osystem['name'], release['name']), + 'license_key': license_key, + }, + instance=node) + self.assertFalse(form.is_valid()) + self.assertItemsEqual(['license_key'], form._errors.keys()) + + def test_rejects_duplicate_fqdn_with_unmanaged_dns_on_one_nodegroup(self): + # If a host with a given hostname exists on a managed nodegroup, + # new nodes on unmanaged nodegroups with hostnames that match + # that FQDN will be rejected. + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) + node = factory.make_Node( + hostname=factory.make_name("hostname"), nodegroup=nodegroup) + other_nodegroup = factory.make_NodeGroup() + form = NodeForm(data={ + 'nodegroup': other_nodegroup, + 'hostname': node.fqdn, + 'architecture': make_usable_architecture(self), + }) + form.instance.nodegroup = other_nodegroup + self.assertFalse(form.is_valid()) + + def test_rejects_duplicate_fqdn_on_same_nodegroup(self): + # If a node with a given FQDN exists on a managed nodegroup, new + # nodes on that nodegroup with duplicate FQDNs will be rejected. + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) + node = factory.make_Node( + hostname=factory.make_name("hostname"), nodegroup=nodegroup) + form = NodeForm(data={ + 'nodegroup': nodegroup, + 'hostname': node.fqdn, + 'architecture': make_usable_architecture(self), + }) + form.instance.nodegroup = nodegroup + self.assertFalse(form.is_valid()) + + def test_obeys_disable_ipv4_if_given(self): + setting = factory.pick_bool() + cluster = factory.make_NodeGroup(default_disable_ipv4=(not setting)) + form = NodeForm( + data={ + 'nodegroup': cluster, + 'architecture': make_usable_architecture(self), + 'disable_ipv4': setting, + }) + form.instance.nodegroup = cluster + node = form.save() + self.assertEqual(setting, node.disable_ipv4) + + def test_takes_missing_disable_ipv4_as_False_in_UI(self): + form = NodeForm( + instance=factory.make_Node(disable_ipv4=True), + data={ + 'architecture': make_usable_architecture(self), + 'ui_submission': True, + }) + node = form.save() + self.assertFalse(node.disable_ipv4) + + def test_takes_missing_disable_ipv4_as_Unchanged_in_API(self): + form = NodeForm( + instance=factory.make_Node(disable_ipv4=True), + data={ + 'architecture': make_usable_architecture(self), + }) + node = form.save() + self.assertTrue(node.disable_ipv4) + + def test_takes_True_disable_ipv4_from_cluster_by_default(self): + setting = True + cluster = factory.make_NodeGroup(default_disable_ipv4=setting) + form = NodeForm( + data={ + 'nodegroup': cluster, + 'architecture': make_usable_architecture(self), + }) + form.instance.nodegroup = cluster + node = form.save() + self.assertEqual(setting, node.disable_ipv4) + + def test_takes_False_disable_ipv4_from_cluster_by_default(self): + setting = False + cluster = factory.make_NodeGroup(default_disable_ipv4=setting) + form = NodeForm( + data={ + 'nodegroup': cluster, + 'architecture': make_usable_architecture(self), + }) + form.instance.nodegroup = cluster + node = form.save() + self.assertEqual(setting, node.disable_ipv4) + + def test_shows_disable_ipv4_if_IPv6_revealed_and_configured(self): + self.patch(forms_module, 'REVEAL_IPv6', True) + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + factory.make_NodeGroupInterface( + node.nodegroup, network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + form = NodeForm( + instance=node, + data={'architecture': make_usable_architecture(self)}) + self.assertIsInstance( + form.fields['disable_ipv4'].widget, CheckboxInput) + + def test_hides_disable_ipv4_if_IPv6_not_revealed(self): + self.patch(forms_module, 'REVEAL_IPv6', False) + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + factory.make_NodeGroupInterface( + node.nodegroup, network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + form = NodeForm( + instance=node, + data={'architecture': make_usable_architecture(self)}) + self.assertIsInstance(form.fields['disable_ipv4'].widget, HiddenInput) + + def test_hides_disable_ipv4_if_IPv6_not_configured(self): + self.patch(forms_module, 'REVEAL_IPv6', True) + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + factory.make_NodeGroupInterface( + node.nodegroup, network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + form = NodeForm( + instance=node, + data={'architecture': make_usable_architecture(self)}) + self.assertIsInstance(form.fields['disable_ipv4'].widget, HiddenInput) + + def test_shows_disable_ipv4_on_new_node_if_any_cluster_supports_it(self): + self.patch(forms_module, 'REVEAL_IPv6', True) + factory.make_NodeGroupInterface( + factory.make_NodeGroup(), network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + form = NodeForm(data={'architecture': make_usable_architecture(self)}) + self.assertIsInstance( + form.fields['disable_ipv4'].widget, CheckboxInput) + + def test_hides_disable_ipv4_on_new_node_if_no_cluster_supports_it(self): + self.patch(forms_module, 'REVEAL_IPv6', True) + factory.make_NodeGroupInterface( + factory.make_NodeGroup(), network=factory.make_ipv6_network(), + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + form = NodeForm(data={'architecture': make_usable_architecture(self)}) + self.assertIsInstance(form.fields['disable_ipv4'].widget, HiddenInput) + + +class TestAdminNodeForm(MAASServerTestCase): + + def test_AdminNodeForm_contains_limited_set_of_fields(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + form = AdminNodeForm(instance=node) + + self.assertEqual( + [ + 'hostname', + 'architecture', + 'osystem', + 'distro_series', + 'license_key', + 'disable_ipv4', + 'boot_type', + 'power_type', + 'power_parameters', + 'cpu_count', + 'memory', + 'storage', + 'zone', + ], + list(form.fields)) + + def test_AdminNodeForm_initialises_zone(self): + # The zone field uses "to_field_name", so that it can refer to a zone + # by name instead of by ID. A bug in Django breaks initialisation + # from an instance: the field tries to initialise the field using a + # zone's ID instead of its name, and ends up reverting to the default. + # The code must work around this bug. + zone = factory.make_Zone() + node = factory.make_Node(zone=zone) + # We'll create a form that makes a change, but not to the zone. + data = {'hostname': factory.make_name('host')} + form = AdminNodeForm(instance=node, data=data) + # The Django bug would stop the initial field value from being set, + # but the workaround ensures that it is initialised. + self.assertEqual(zone.name, form.initial['zone']) + + def test_AdminNodeForm_changes_node(self): + node = factory.make_Node() + zone = factory.make_Zone() + hostname = factory.make_string() + power_type = factory.pick_power_type() + form = AdminNodeForm( + data={ + 'hostname': hostname, + 'power_type': power_type, + 'architecture': make_usable_architecture(self), + 'zone': zone.name, + }, + instance=node) + form.save() + + node = reload_object(node) + self.assertEqual( + (node.hostname, node.power_type, node.zone), + (hostname, power_type, zone)) + + def test_AdminNodeForm_populates_power_type_choices(self): + form = AdminNodeForm() + self.assertEqual( + [''] + [choice[0] for choice in get_power_type_choices()], + [choice[0] for choice in form.fields['power_type'].choices]) + + def test_AdminNodeForm_populates_power_type_initial(self): + node = factory.make_Node() + form = AdminNodeForm(instance=node) + self.assertEqual(node.power_type, form.fields['power_type'].initial) + + def test_AdminNodeForm_changes_node_with_skip_check(self): + node = factory.make_Node() + hostname = factory.make_string() + power_type = factory.pick_power_type() + power_parameters_field = factory.make_string() + arch = make_usable_architecture(self) + form = AdminNodeForm( + data={ + 'hostname': hostname, + 'architecture': arch, + 'power_type': power_type, + 'power_parameters_field': power_parameters_field, + 'power_parameters_skip_check': True, + }, + instance=node) + form.save() + + self.assertEqual( + (hostname, power_type, {'field': power_parameters_field}), + (node.hostname, node.power_type, node.power_parameters)) + + def test_AdminForm_does_not_permit_nodegroup_change(self): + # We had to make Node.nodegroup editable to get Django to + # validate it as non-blankable, but that doesn't mean that we + # actually want to allow people to edit it through API or UI. + old_nodegroup = factory.make_NodeGroup() + node = factory.make_Node( + nodegroup=old_nodegroup, + architecture=make_usable_architecture(self)) + new_nodegroup = factory.make_NodeGroup() + AdminNodeForm(data={'nodegroup': new_nodegroup}, instance=node).save() + # The form saved without error, but the nodegroup change was ignored. + self.assertEqual(old_nodegroup, node.nodegroup) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodewithmacaddresses.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodewithmacaddresses.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_nodewithmacaddresses.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_nodewithmacaddresses.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,160 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `NodeWithMACAddressesForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.http import QueryDict +from maasserver.forms import NodeWithMACAddressesForm +from maasserver.models import NodeGroup +from maasserver.testing.architecture import ( + make_usable_architecture, + patch_usable_architectures, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase +from netaddr import IPNetwork + + +class NodeWithMACAddressesFormTest(MAASServerTestCase): + + def get_QueryDict(self, params): + query_dict = QueryDict('', mutable=True) + for k, v in params.items(): + if isinstance(v, list): + query_dict.setlist(k, v) + else: + query_dict[k] = v + return query_dict + + def make_params(self, mac_addresses=None, architecture=None, + hostname=None, nodegroup=None): + if mac_addresses is None: + mac_addresses = [factory.make_mac_address()] + if architecture is None: + architecture = factory.make_name('arch') + if hostname is None: + hostname = factory.make_name('hostname') + params = { + 'mac_addresses': mac_addresses, + 'architecture': architecture, + 'hostname': hostname, + } + if nodegroup is not None: + params['nodegroup'] = nodegroup + # Make sure that the architecture parameter is acceptable. + patch_usable_architectures(self, [architecture]) + return self.get_QueryDict(params) + + def test_NodeWithMACAddressesForm_valid(self): + architecture = make_usable_architecture(self) + form = NodeWithMACAddressesForm( + data=self.make_params( + mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], + architecture=architecture)) + + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual( + ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], + form.cleaned_data['mac_addresses']) + self.assertEqual(architecture, form.cleaned_data['architecture']) + + def test_NodeWithMACAddressesForm_simple_invalid(self): + # If the form only has one (invalid) MAC address field to validate, + # the error message in form.errors['mac_addresses'] is the + # message from the field's validation error. + form = NodeWithMACAddressesForm( + data=self.make_params(mac_addresses=['invalid'])) + + self.assertFalse(form.is_valid()) + self.assertEqual(['mac_addresses'], list(form.errors)) + self.assertEqual( + ["'invalid' is not a valid MAC address."], + form.errors['mac_addresses']) + + def test_NodeWithMACAddressesForm_multiple_invalid(self): + # If the form has multiple MAC address fields to validate, + # if one or more fields are invalid, a single error message is + # present in form.errors['mac_addresses'] after validation. + form = NodeWithMACAddressesForm( + data=self.make_params(mac_addresses=['invalid_1', 'invalid_2'])) + + self.assertFalse(form.is_valid()) + self.assertEqual(['mac_addresses'], list(form.errors)) + self.assertEqual( + [ + "One or more MAC addresses is invalid. " + "('invalid_1' is not a valid MAC address. \u2014" + " 'invalid_2' is not a valid MAC address.)" + ], + form.errors['mac_addresses']) + + def test_NodeWithMACAddressesForm_empty(self): + # Empty values in the list of MAC addresses are simply ignored. + form = NodeWithMACAddressesForm( + data=self.make_params( + mac_addresses=[factory.make_mac_address(), ''])) + + self.assertTrue(form.is_valid()) + + def test_NodeWithMACAddressesForm_save(self): + macs = ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'] + form = NodeWithMACAddressesForm( + data=self.make_params(mac_addresses=macs)) + node = form.save() + + self.assertIsNotNone(node.id) # The node is persisted. + self.assertSequenceEqual( + macs, + [mac.mac_address for mac in node.macaddress_set.all()]) + + def test_includes_nodegroup_field_for_new_node(self): + self.assertIn( + 'nodegroup', + NodeWithMACAddressesForm(data=self.make_params()).fields) + + def test_does_not_include_nodegroup_field_for_existing_node(self): + params = self.make_params() + node = factory.make_Node() + self.assertNotIn( + 'nodegroup', + NodeWithMACAddressesForm(data=params, instance=node).fields) + + def test_sets_nodegroup_to_master_by_default(self): + self.assertEqual( + NodeGroup.objects.ensure_master(), + NodeWithMACAddressesForm(data=self.make_params()).save().nodegroup) + + def test_leaves_nodegroup_alone_if_unset_on_existing_node(self): + # Selecting a node group for a node is only supported on new + # nodes. You can't change it later. + original_nodegroup = factory.make_NodeGroup() + node = factory.make_Node(nodegroup=original_nodegroup) + factory.make_NodeGroup(network=IPNetwork("192.168.1.0/24")) + form = NodeWithMACAddressesForm( + data=self.make_params(nodegroup='192.168.1.0'), instance=node) + form.save() + self.assertEqual(original_nodegroup, reload_object(node).nodegroup) + + def test_form_without_hostname_generates_hostname(self): + form = NodeWithMACAddressesForm(data=self.make_params(hostname='')) + node = form.save() + self.assertTrue(len(node.hostname) > 0) + + def test_form_with_ip_based_hostname_generates_hostname(self): + ip_based_hostname = '192-168-12-10.domain' + form = NodeWithMACAddressesForm( + data=self.make_params(hostname=ip_based_hostname)) + node = form.save() + self.assertNotEqual(ip_based_hostname, node.hostname) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1800 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test forms.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import json - -from django import forms -from django.conf import settings -from django.contrib.auth.models import User -from django.core.exceptions import ValidationError -from django.core.files.uploadedfile import SimpleUploadedFile -from django.core.validators import validate_email -from django.http import QueryDict -from maasserver.clusterrpc.power_parameters import get_power_type_choices -from maasserver.enum import ( - NODE_STATUS, - NODEGROUP_STATUS, - NODEGROUPINTERFACE_MANAGEMENT, - ) -from maasserver.forms import ( - AdminNodeForm, - AdminNodeWithMACAddressesForm, - BLANK_CHOICE, - BulkNodeActionForm, - CommissioningForm, - CommissioningScriptForm, - ConfigForm, - DownloadProgressForm, - EditUserForm, - get_action_form, - get_node_create_form, - get_node_edit_form, - initialize_node_group, - InstanceListField, - INTERFACES_VALIDATION_ERROR_MESSAGE, - list_all_usable_architectures, - MACAddressForm, - NetworkForm, - NewUserCreationForm, - NO_ARCHITECTURES_AVAILABLE, - NodeActionForm, - NodeForm, - NodeGroupEdit, - NodeGroupInterfaceForeignDHCPForm, - NodeGroupInterfaceForm, - NodeGroupWithInterfacesForm, - NodeWithMACAddressesForm, - pick_default_architecture, - ProfileForm, - remove_None_values, - SetZoneBulkAction, - UnconstrainedMultipleChoiceField, - validate_nonoverlapping_networks, - ValidatorMultipleChoiceField, - ZoneForm, - ) -from maasserver.models import ( - Config, - MACAddress, - Network, - Node, - NodeGroup, - NodeGroupInterface, - Zone, - ) -from maasserver.models.config import DEFAULT_CONFIG -from maasserver.node_action import ( - Commission, - Delete, - StartNode, - StopNode, - UseCurtin, - ) -from maasserver.testing import reload_object -from maasserver.testing.architecture import ( - make_usable_architecture, - patch_usable_architectures, - ) -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import map_enum -from maasserver.utils.forms import compose_invalid_choice_text -from metadataserver.models import CommissioningScript -from netaddr import IPNetwork -from provisioningserver import tasks -from testtools import TestCase -from testtools.matchers import ( - AllMatch, - Contains, - Equals, - MatchesAll, - MatchesRegex, - MatchesStructure, - StartsWith, - ) - - -class TestHelpers(MAASServerTestCase): - - def make_usable_boot_images(self, nodegroup=None, arch=None, - subarchitecture=None): - """Create a set of boot images, so the architecture becomes "usable". - - This will make the images' architecture show up in the list of usable - architecture. - - Nothing is returned. - """ - if nodegroup is None: - nodegroup = factory.make_node_group() - if arch is None: - arch = factory.make_name('arch') - if subarchitecture is None: - subarchitecture = factory.make_name('subarch') - for purpose in ['install', 'commissioning']: - factory.make_boot_image( - nodegroup=nodegroup, architecture=arch, - subarchitecture=subarchitecture, purpose=purpose) - - def test_initialize_node_group_leaves_nodegroup_reference_intact(self): - preselected_nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=preselected_nodegroup) - initialize_node_group(node) - self.assertEqual(preselected_nodegroup, node.nodegroup) - - def test_initialize_node_group_initializes_nodegroup_to_form_value(self): - node = Node( - NODE_STATUS.DECLARED, architecture=make_usable_architecture(self)) - nodegroup = factory.make_node_group() - initialize_node_group(node, nodegroup) - self.assertEqual(nodegroup, node.nodegroup) - - def test_initialize_node_group_defaults_to_master(self): - node = Node( - NODE_STATUS.DECLARED, - architecture=make_usable_architecture(self)) - initialize_node_group(node) - self.assertEqual(NodeGroup.objects.ensure_master(), node.nodegroup) - - def test_list_all_usable_architectures_combines_nodegroups(self): - arches = [ - (factory.make_name('arch'), factory.make_name('subarch')) - for _ in range(3)] - for arch, subarch in arches: - self.make_usable_boot_images(arch=arch, subarchitecture=subarch) - expected = [ - "%s/%s" % (arch, subarch) for arch, subarch in arches] - self.assertItemsEqual(expected, list_all_usable_architectures()) - - def test_list_all_usable_architectures_sorts_output(self): - arches = [ - (factory.make_name('arch'), factory.make_name('subarch')) - for _ in range(3)] - for arch, subarch in arches: - self.make_usable_boot_images(arch=arch, subarchitecture=subarch) - expected = [ - "%s/%s" % (arch, subarch) for arch, subarch in arches] - self.assertEqual(sorted(expected), list_all_usable_architectures()) - - def test_list_all_usable_architectures_returns_no_duplicates(self): - arch = factory.make_name('arch') - subarch = factory.make_name('subarch') - self.make_usable_boot_images(arch=arch, subarchitecture=subarch) - self.make_usable_boot_images(arch=arch, subarchitecture=subarch) - self.assertEqual( - ["%s/%s" % (arch, subarch)], list_all_usable_architectures()) - - def test_pick_default_architecture_returns_empty_if_no_options(self): - self.assertEqual('', pick_default_architecture([])) - - def test_pick_default_architecture_prefers_i386_generic_if_usable(self): - self.assertEqual( - 'i386/generic', - pick_default_architecture( - ['amd64/generic', 'i386/generic', 'mips/generic'])) - - def test_pick_default_architecture_falls_back_to_first_option(self): - arches = [factory.make_name('arch') for _ in range(5)] - self.assertEqual(arches[0], pick_default_architecture(arches)) - - def test_remove_None_values_removes_None_values_in_dict(self): - random_input = factory.getRandomString() - self.assertEqual( - {random_input: random_input}, - remove_None_values({ - random_input: random_input, - factory.getRandomString(): None - })) - - def test_remove_None_values_leaves_empty_dict_untouched(self): - self.assertEqual({}, remove_None_values({})) - - def test_get_node_edit_form_returns_NodeForm_if_non_admin(self): - user = factory.make_user() - self.assertEqual(NodeForm, get_node_edit_form(user)) - - def test_get_node_edit_form_returns_APIAdminNodeEdit_if_admin(self): - admin = factory.make_admin() - self.assertEqual(AdminNodeForm, get_node_edit_form(admin)) - - def test_get_node_create_form_if_non_admin(self): - user = factory.make_user() - self.assertEqual( - NodeWithMACAddressesForm, get_node_create_form(user)) - - def test_get_node_create_form_if_admin(self): - admin = factory.make_admin() - self.assertEqual( - AdminNodeWithMACAddressesForm, get_node_create_form(admin)) - - -class NodeWithMACAddressesFormTest(MAASServerTestCase): - - def get_QueryDict(self, params): - query_dict = QueryDict('', mutable=True) - for k, v in params.items(): - if isinstance(v, list): - query_dict.setlist(k, v) - else: - query_dict[k] = v - return query_dict - - def make_params(self, mac_addresses=None, architecture=None, - hostname=None, nodegroup=None): - if mac_addresses is None: - mac_addresses = [factory.getRandomMACAddress()] - if architecture is None: - architecture = factory.make_name('arch') - if hostname is None: - hostname = factory.make_name('hostname') - params = { - 'mac_addresses': mac_addresses, - 'architecture': architecture, - 'hostname': hostname, - } - if nodegroup is not None: - params['nodegroup'] = nodegroup - # Make sure that the architecture parameter is acceptable. - patch_usable_architectures(self, [architecture]) - return self.get_QueryDict(params) - - def test_NodeWithMACAddressesForm_valid(self): - architecture = make_usable_architecture(self) - form = NodeWithMACAddressesForm( - data=self.make_params( - mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], - architecture=architecture)) - - self.assertTrue(form.is_valid(), form.errors) - self.assertEqual( - ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], - form.cleaned_data['mac_addresses']) - self.assertEqual(architecture, form.cleaned_data['architecture']) - - def test_NodeWithMACAddressesForm_simple_invalid(self): - # If the form only has one (invalid) MAC address field to validate, - # the error message in form.errors['mac_addresses'] is the - # message from the field's validation error. - form = NodeWithMACAddressesForm( - data=self.make_params(mac_addresses=['invalid'])) - - self.assertFalse(form.is_valid()) - self.assertEqual(['mac_addresses'], list(form.errors)) - self.assertEqual( - ['Enter a valid MAC address (e.g. AA:BB:CC:DD:EE:FF).'], - form.errors['mac_addresses']) - - def test_NodeWithMACAddressesForm_multiple_invalid(self): - # If the form has multiple MAC address fields to validate, - # if one or more fields are invalid, a single error message is - # present in form.errors['mac_addresses'] after validation. - form = NodeWithMACAddressesForm( - data=self.make_params(mac_addresses=['invalid_1', 'invalid_2'])) - - self.assertFalse(form.is_valid()) - self.assertEqual(['mac_addresses'], list(form.errors)) - self.assertEqual( - ['One or more MAC addresses is invalid.'], - form.errors['mac_addresses']) - - def test_NodeWithMACAddressesForm_empty(self): - # Empty values in the list of MAC addresses are simply ignored. - form = NodeWithMACAddressesForm( - data=self.make_params( - mac_addresses=[factory.getRandomMACAddress(), ''])) - - self.assertTrue(form.is_valid()) - - def test_NodeWithMACAddressesForm_save(self): - macs = ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'] - form = NodeWithMACAddressesForm( - data=self.make_params(mac_addresses=macs)) - node = form.save() - - self.assertIsNotNone(node.id) # The node is persisted. - self.assertSequenceEqual( - macs, - [mac.mac_address for mac in node.macaddress_set.all()]) - - def test_includes_nodegroup_field_for_new_node(self): - self.assertIn( - 'nodegroup', - NodeWithMACAddressesForm(data=self.make_params()).fields) - - def test_does_not_include_nodegroup_field_for_existing_node(self): - params = self.make_params() - node = factory.make_node() - self.assertNotIn( - 'nodegroup', - NodeWithMACAddressesForm(data=params, instance=node).fields) - - def test_sets_nodegroup_to_master_by_default(self): - self.assertEqual( - NodeGroup.objects.ensure_master(), - NodeWithMACAddressesForm(data=self.make_params()).save().nodegroup) - - def test_leaves_nodegroup_alone_if_unset_on_existing_node(self): - # Selecting a node group for a node is only supported on new - # nodes. You can't change it later. - original_nodegroup = factory.make_node_group() - node = factory.make_node(nodegroup=original_nodegroup) - factory.make_node_group(network=IPNetwork("192.168.1.0/24")) - form = NodeWithMACAddressesForm( - data=self.make_params(nodegroup='192.168.1.0'), instance=node) - form.save() - self.assertEqual(original_nodegroup, reload_object(node).nodegroup) - - def test_form_without_hostname_generates_hostname(self): - form = NodeWithMACAddressesForm(data=self.make_params(hostname='')) - node = form.save() - self.assertTrue(len(node.hostname) > 0) - - def test_form_with_ip_based_hostname_generates_hostname(self): - ip_based_hostname = '192-168-12-10.domain' - form = NodeWithMACAddressesForm( - data=self.make_params(hostname=ip_based_hostname)) - node = form.save() - self.assertNotEqual(ip_based_hostname, node.hostname) - - -class TestOptionForm(ConfigForm): - field1 = forms.CharField(label="Field 1", max_length=10) - field2 = forms.BooleanField(label="Field 2", required=False) - - -class TestValidOptionForm(ConfigForm): - maas_name = forms.CharField(label="Field 1", max_length=10) - - -class ConfigFormTest(MAASServerTestCase): - - def test_form_valid_saves_into_db(self): - value = factory.getRandomString(10) - form = TestValidOptionForm({'maas_name': value}) - result = form.save() - - self.assertTrue(result) - self.assertEqual(value, Config.objects.get_config('maas_name')) - - def test_form_rejects_unknown_settings(self): - value = factory.getRandomString(10) - value2 = factory.getRandomString(10) - form = TestOptionForm({'field1': value, 'field2': value2}) - valid = form.is_valid() - - self.assertFalse(valid) - self.assertIn('field1', form._errors) - self.assertIn('field2', form._errors) - - def test_form_invalid_does_not_save_into_db(self): - value_too_long = factory.getRandomString(20) - form = TestOptionForm({'field1': value_too_long, 'field2': False}) - result = form.save() - - self.assertFalse(result) - self.assertIn('field1', form._errors) - self.assertIsNone(Config.objects.get_config('field1')) - self.assertIsNone(Config.objects.get_config('field2')) - - def test_form_loads_initial_values(self): - value = factory.getRandomString() - Config.objects.set_config('field1', value) - form = TestOptionForm() - - self.assertItemsEqual(['field1'], form.initial) - self.assertEqual(value, form.initial['field1']) - - def test_form_loads_initial_values_from_default_value(self): - value = factory.getRandomString() - DEFAULT_CONFIG['field1'] = value - form = TestOptionForm() - - self.assertItemsEqual(['field1'], form.initial) - self.assertEqual(value, form.initial['field1']) - - -class TestNodeForm(MAASServerTestCase): - - def test_contains_limited_set_of_fields(self): - form = NodeForm() - - self.assertEqual( - [ - 'hostname', - 'architecture', - 'distro_series', - 'nodegroup', - ], list(form.fields)) - - def test_changes_node(self): - node = factory.make_node() - hostname = factory.getRandomString() - patch_usable_architectures(self, [node.architecture]) - - form = NodeForm( - data={ - 'hostname': hostname, - 'architecture': make_usable_architecture(self), - }, - instance=node) - form.save() - - self.assertEqual(hostname, node.hostname) - - def test_accepts_usable_architecture(self): - arch = make_usable_architecture(self) - form = NodeForm(data={ - 'hostname': factory.make_name('host'), - 'architecture': arch, - }) - self.assertTrue(form.is_valid(), form._errors) - - def test_rejects_unusable_architecture(self): - patch_usable_architectures(self) - form = NodeForm(data={ - 'hostname': factory.make_name('host'), - 'architecture': factory.make_name('arch'), - }) - self.assertFalse(form.is_valid()) - self.assertItemsEqual(['architecture'], form._errors.keys()) - - def test_starts_with_default_architecture(self): - arches = sorted([factory.make_name('arch') for _ in range(5)]) - patch_usable_architectures(self, arches) - form = NodeForm() - self.assertEqual( - pick_default_architecture(arches), - form.fields['architecture'].initial) - - def test_adds_blank_default_when_no_arches_available(self): - patch_usable_architectures(self, []) - form = NodeForm() - self.assertEqual( - [BLANK_CHOICE], - form.fields['architecture'].choices) - - def test_adds_error_when_no_arches_available(self): - patch_usable_architectures(self, []) - form = NodeForm() - self.assertFalse(form.is_valid()) - self.assertEqual( - [NO_ARCHITECTURES_AVAILABLE], - form.errors['architecture']) - - -class TestAdminNodeForm(MAASServerTestCase): - - def test_AdminNodeForm_contains_limited_set_of_fields(self): - node = factory.make_node() - form = AdminNodeForm(instance=node) - - self.assertEqual( - [ - 'hostname', - 'architecture', - 'distro_series', - 'power_type', - 'power_parameters', - 'cpu_count', - 'memory', - 'storage', - 'zone', - ], - list(form.fields)) - - def test_AdminNodeForm_initialises_zone(self): - # The zone field uses "to_field_name", so that it can refer to a zone - # by name instead of by ID. A bug in Django breaks initialisation - # from an instance: the field tries to initialise the field using a - # zone's ID instead of its name, and ends up reverting to the default. - # The code must work around this bug. - zone = factory.make_zone() - node = factory.make_node(zone=zone) - # We'll create a form that makes a change, but not to the zone. - data = {'hostname': factory.make_name('host')} - form = AdminNodeForm(instance=node, data=data) - # The Django bug would stop the initial field value from being set, - # but the workaround ensures that it is initialised. - self.assertEqual(zone.name, form.initial['zone']) - - def test_AdminNodeForm_changes_node(self): - node = factory.make_node() - zone = factory.make_zone() - hostname = factory.getRandomString() - power_type = factory.getRandomPowerType() - form = AdminNodeForm( - data={ - 'hostname': hostname, - 'power_type': power_type, - 'architecture': make_usable_architecture(self), - 'zone': zone.name, - }, - instance=node) - form.save() - - node = reload_object(node) - self.assertEqual( - (node.hostname, node.power_type, node.zone), - (hostname, power_type, zone)) - - def test_AdminNodeForm_refuses_to_update_hostname_on_allocated_node(self): - old_name = factory.make_name('old-hostname') - new_name = factory.make_name('new-hostname') - node = factory.make_node( - hostname=old_name, status=NODE_STATUS.ALLOCATED) - form = AdminNodeForm( - data={ - 'hostname': new_name, - 'architecture': node.architecture, - }, - instance=node) - self.assertFalse(form.is_valid()) - self.assertEqual( - ["Can't change hostname to %s: node is in use." % new_name], - form._errors['hostname']) - - def test_AdminNodeForm_accepts_unchanged_hostname_on_allocated_node(self): - old_name = factory.make_name('old-hostname') - node = factory.make_node( - hostname=old_name, status=NODE_STATUS.ALLOCATED) - patch_usable_architectures(self, [node.architecture]) - form = AdminNodeForm( - data={ - 'hostname': old_name, - 'architecture': node.architecture, - }, - instance=node) - self.assertTrue(form.is_valid(), form._errors) - form.save() - self.assertEqual(old_name, reload_object(node).hostname) - - def test_AdminNodeForm_populates_power_type_choices(self): - form = AdminNodeForm() - self.assertEqual( - [''] + [choice[0] for choice in get_power_type_choices()], - [choice[0] for choice in form.fields['power_type'].choices]) - - def test_AdminNodeForm_populates_power_type_initial(self): - node = factory.make_node() - form = AdminNodeForm(instance=node) - self.assertEqual(node.power_type, form.fields['power_type'].initial) - - def test_AdminNodeForm_changes_node_with_skip_check(self): - node = factory.make_node() - hostname = factory.getRandomString() - power_type = factory.getRandomPowerType() - power_parameters_field = factory.getRandomString() - arch = make_usable_architecture(self) - form = AdminNodeForm( - data={ - 'hostname': hostname, - 'architecture': arch, - 'power_type': power_type, - 'power_parameters_field': power_parameters_field, - 'power_parameters_skip_check': True, - }, - instance=node) - form.save() - - self.assertEqual( - (hostname, power_type, {'field': power_parameters_field}), - (node.hostname, node.power_type, node.power_parameters)) - - def test_AdminForm_does_not_permit_nodegroup_change(self): - # We had to make Node.nodegroup editable to get Django to - # validate it as non-blankable, but that doesn't mean that we - # actually want to allow people to edit it through API or UI. - old_nodegroup = factory.make_node_group() - node = factory.make_node( - nodegroup=old_nodegroup, - architecture=make_usable_architecture(self)) - new_nodegroup = factory.make_node_group() - AdminNodeForm(data={'nodegroup': new_nodegroup}, instance=node).save() - # The form saved without error, but the nodegroup change was ignored. - self.assertEqual(old_nodegroup, node.nodegroup) - - -class TestNodeActionForm(MAASServerTestCase): - - def test_get_action_form_creates_form_class_with_attributes(self): - user = factory.make_admin() - form_class = get_action_form(user) - - self.assertEqual(user, form_class.user) - - def test_get_action_form_creates_form_class(self): - user = factory.make_admin() - node = factory.make_node(status=NODE_STATUS.DECLARED) - form = get_action_form(user)(node) - - self.assertIsInstance(form, NodeActionForm) - self.assertEqual(node, form.node) - - def test_get_action_form_for_admin(self): - admin = factory.make_admin() - node = factory.make_node(status=NODE_STATUS.DECLARED) - node.use_traditional_installer() - form = get_action_form(admin)(node) - - self.assertItemsEqual( - [Commission.name, Delete.name, UseCurtin.name], - form.actions) - - def test_get_action_form_for_user(self): - user = factory.make_user() - node = factory.make_node(status=NODE_STATUS.DECLARED) - form = get_action_form(user)(node) - - self.assertIsInstance(form, NodeActionForm) - self.assertEqual(node, form.node) - self.assertItemsEqual({}, form.actions) - - def test_save_performs_requested_action(self): - admin = factory.make_admin() - node = factory.make_node(status=NODE_STATUS.DECLARED) - form = get_action_form(admin)( - node, {NodeActionForm.input_name: Commission.name}) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) - - def test_rejects_disallowed_action(self): - user = factory.make_user() - node = factory.make_node(status=NODE_STATUS.DECLARED) - form = get_action_form(user)( - node, {NodeActionForm.input_name: Commission.name}) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'action': ['Not a permitted action: %s.' % Commission.name]}, - form._errors) - - def test_rejects_unknown_action(self): - user = factory.make_user() - node = factory.make_node(status=NODE_STATUS.DECLARED) - action = factory.getRandomString() - form = get_action_form(user)( - node, {NodeActionForm.input_name: action}) - self.assertFalse(form.is_valid()) - self.assertIn( - "is not one of the available choices.", form._errors['action'][0]) - - -class TestUniqueEmailForms(MAASServerTestCase): - - def assertFormFailsValidationBecauseEmailNotUnique(self, form): - self.assertFalse(form.is_valid()) - self.assertIn('email', form._errors) - self.assertEquals(1, len(form._errors['email'])) - # Cope with 'Email' and 'E-mail' in error message. - self.assertThat( - form._errors['email'][0], - MatchesRegex( - r'User with this E-{0,1}mail address already exists.')) - - def test_ProfileForm_fails_validation_if_email_taken(self): - another_email = '%s@example.com' % factory.getRandomString() - factory.make_user(email=another_email) - email = '%s@example.com' % factory.getRandomString() - user = factory.make_user(email=email) - form = ProfileForm(instance=user, data={'email': another_email}) - self.assertFormFailsValidationBecauseEmailNotUnique(form) - - def test_ProfileForm_validates_if_email_unchanged(self): - email = '%s@example.com' % factory.getRandomString() - user = factory.make_user(email=email) - form = ProfileForm(instance=user, data={'email': email}) - self.assertTrue(form.is_valid()) - - def test_NewUserCreationForm_fails_validation_if_email_taken(self): - email = '%s@example.com' % factory.getRandomString() - username = factory.getRandomString() - password = factory.getRandomString() - factory.make_user(email=email) - form = NewUserCreationForm( - { - 'email': email, - 'username': username, - 'password1': password, - 'password2': password, - }) - self.assertFormFailsValidationBecauseEmailNotUnique(form) - - def test_EditUserForm_fails_validation_if_email_taken(self): - another_email = '%s@example.com' % factory.getRandomString() - factory.make_user(email=another_email) - email = '%s@example.com' % factory.getRandomString() - user = factory.make_user(email=email) - form = EditUserForm(instance=user, data={'email': another_email}) - self.assertFormFailsValidationBecauseEmailNotUnique(form) - - def test_EditUserForm_validates_if_email_unchanged(self): - email = '%s@example.com' % factory.getRandomString() - user = factory.make_user(email=email) - form = EditUserForm( - instance=user, - data={ - 'email': email, - 'username': factory.getRandomString(), - }) - self.assertTrue(form.is_valid()) - - -class TestNewUserCreationForm(MAASServerTestCase): - - def test_saves_to_db_by_default(self): - password = factory.make_name('password') - params = { - 'email': '%s@example.com' % factory.getRandomString(), - 'username': factory.make_name('user'), - 'password1': password, - 'password2': password, - } - form = NewUserCreationForm(params) - form.save() - self.assertIsNotNone(User.objects.get(username=params['username'])) - - def test_email_is_required(self): - password = factory.make_name('password') - params = { - 'email': '', - 'username': factory.make_name('user'), - 'password1': password, - 'password2': password, - } - form = NewUserCreationForm(params) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'email': ['This field is required.']}, - form._errors) - - def test_does_not_save_to_db_if_commit_is_False(self): - password = factory.make_name('password') - params = { - 'email': '%s@example.com' % factory.getRandomString(), - 'username': factory.make_name('user'), - 'password1': password, - 'password2': password, - } - form = NewUserCreationForm(params) - form.save(commit=False) - self.assertItemsEqual( - [], User.objects.filter(username=params['username'])) - - def test_fields_order(self): - form = NewUserCreationForm() - - self.assertEqual( - ['username', 'last_name', 'email', 'password1', 'password2', - 'is_superuser'], - list(form.fields)) - - -class TestMACAddressForm(MAASServerTestCase): - - def test_MACAddressForm_creates_mac_address(self): - node = factory.make_node() - mac = factory.getRandomMACAddress() - form = MACAddressForm(node=node, data={'mac_address': mac}) - form.save() - self.assertTrue( - MACAddress.objects.filter(node=node, mac_address=mac).exists()) - - def test_saves_to_db_by_default(self): - node = factory.make_node() - mac = factory.getRandomMACAddress() - form = MACAddressForm(node=node, data={'mac_address': mac}) - form.save() - self.assertEqual( - mac, MACAddress.objects.get(mac_address=mac).mac_address) - - def test_does_not_save_to_db_if_commit_is_False(self): - node = factory.make_node() - mac = factory.getRandomMACAddress() - form = MACAddressForm(node=node, data={'mac_address': mac}) - form.save(commit=False) - self.assertItemsEqual([], MACAddress.objects.filter(mac_address=mac)) - - def test_MACAddressForm_displays_error_message_if_mac_already_used(self): - mac = factory.getRandomMACAddress() - node = factory.make_mac_address(address=mac) - node = factory.make_node() - form = MACAddressForm(node=node, data={'mac_address': mac}) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'mac_address': ['This MAC address is already registered.']}, - form._errors) - self.assertFalse( - MACAddress.objects.filter(node=node, mac_address=mac).exists()) - - -def make_interface_settings(network=None, management=None): - """Create a dict of arbitrary interface configuration parameters.""" - if network is None: - network = factory.getRandomNetwork() - if management is None: - management = factory.getRandomEnum(NODEGROUPINTERFACE_MANAGEMENT) - # Pick upper and lower boundaries of IP range, with upper > lower. - ip_range_low, ip_range_high = factory.make_ip_range(network) - return { - 'ip': factory.getRandomIPInNetwork(network), - 'interface': factory.make_name('interface'), - 'subnet_mask': unicode(network.netmask), - 'broadcast_ip': unicode(network.broadcast), - 'router_ip': factory.getRandomIPInNetwork(network), - 'ip_range_low': unicode(ip_range_low), - 'ip_range_high': unicode(ip_range_high), - 'management': management, - } - - -nullable_fields = [ - 'subnet_mask', 'broadcast_ip', 'router_ip', 'ip_range_low', - 'ip_range_high'] - - -class TestNodeGroupInterfaceForm(MAASServerTestCase): - - def test_NodeGroupInterfaceForm_validates_parameters(self): - form = NodeGroupInterfaceForm(data={'ip': factory.getRandomString()}) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'ip': ['Enter a valid IPv4 or IPv6 address.']}, form._errors) - - def test_NodeGroupInterfaceForm_can_save_fields_being_None(self): - int_settings = make_interface_settings() - int_settings['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED - for field_name in nullable_fields: - del int_settings[field_name] - nodegroup = factory.make_node_group() - form = NodeGroupInterfaceForm( - data=int_settings, - instance=NodeGroupInterface(nodegroup=nodegroup)) - interface = form.save() - field_values = [ - getattr(interface, field_name) for field_name in nullable_fields] - self.assertThat(field_values, AllMatch(Equals(''))) - - -class TestNodeGroupInterfaceForeignDHCPForm(MAASServerTestCase): - - def test_forms_saves_foreign_dhcp_ip(self): - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - foreign_dhcp_ip = factory.getRandomIPAddress() - form = NodeGroupInterfaceForeignDHCPForm( - data={'foreign_dhcp_ip': foreign_dhcp_ip}, - instance=interface) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual( - foreign_dhcp_ip, reload_object(interface).foreign_dhcp_ip) - - def test_forms_validates_foreign_dhcp_ip(self): - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() - form = NodeGroupInterfaceForeignDHCPForm( - data={'foreign_dhcp_ip': 'invalid-ip'}, instance=interface) - self.assertFalse(form.is_valid()) - - def test_report_foreign_dhcp_does_not_trigger_update_signal(self): - self.patch(settings, "DHCP_CONNECT", False) - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - [interface] = nodegroup.get_managed_interfaces() - - self.patch(settings, "DHCP_CONNECT", True) - self.patch(tasks, 'write_dhcp_config') - - foreign_dhcp_ip = factory.getRandomIPAddress() - form = NodeGroupInterfaceForeignDHCPForm( - data={'foreign_dhcp_ip': foreign_dhcp_ip}, - instance=interface) - - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual( - foreign_dhcp_ip, reload_object(interface).foreign_dhcp_ip) - tasks.write_dhcp_config.apply_async.assert_has_calls([]) - - -class TestValidateNonoverlappingNetworks(TestCase): - """Tests for `validate_nonoverlapping_networks`.""" - - def make_interface_definition(self, ip, netmask, name=None): - """Return a minimal imitation of an interface definition.""" - if name is None: - name = factory.make_name('itf') - return { - 'interface': name, - 'ip': ip, - 'subnet_mask': netmask, - 'management': NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, - } - - def test_accepts_zero_interfaces(self): - validate_nonoverlapping_networks([]) - # Success is getting here without error. - pass - - def test_accepts_single_interface(self): - validate_nonoverlapping_networks( - [self.make_interface_definition('10.1.1.1', '255.255.0.0')]) - # Success is getting here without error. - pass - - def test_accepts_disparate_ranges(self): - validate_nonoverlapping_networks([ - self.make_interface_definition('10.1.0.0', '255.255.0.0'), - self.make_interface_definition('192.168.0.0', '255.255.255.0'), - ]) - # Success is getting here without error. - pass - - def test_accepts_near_neighbours(self): - validate_nonoverlapping_networks([ - self.make_interface_definition('10.1.0.0', '255.255.0.0'), - self.make_interface_definition('10.2.0.0', '255.255.0.0'), - ]) - # Success is getting here without error. - pass - - def test_rejects_identical_ranges(self): - definitions = [ - self.make_interface_definition('192.168.0.0', '255.255.255.0'), - self.make_interface_definition('192.168.0.0', '255.255.255.0'), - ] - error = self.assertRaises( - ValidationError, - validate_nonoverlapping_networks, definitions) - error_text = error.messages[0] - self.assertThat( - error_text, MatchesRegex( - "Conflicting networks on [^\\s]+ and [^\\s]+: " - "address ranges overlap.")) - self.assertThat( - error_text, - MatchesAll( - *( - Contains(definition['interface']) - for definition in definitions - ))) - - def test_rejects_nested_ranges(self): - definitions = [ - self.make_interface_definition('192.168.0.0', '255.255.0.0'), - self.make_interface_definition('192.168.100.0', '255.255.255.0'), - ] - error = self.assertRaises( - ValidationError, - validate_nonoverlapping_networks, definitions) - self.assertIn("Conflicting networks", unicode(error)) - - def test_detects_conflict_regardless_of_order(self): - definitions = [ - self.make_interface_definition('192.168.100.0', '255.255.255.0'), - self.make_interface_definition('192.168.1.0', '255.255.255.0'), - self.make_interface_definition('192.168.64.0', '255.255.192.0'), - ] - error = self.assertRaises( - ValidationError, - validate_nonoverlapping_networks, definitions) - self.assertThat(error.messages[0], StartsWith("Conflicting networks")) - - -class TestNodeGroupWithInterfacesForm(MAASServerTestCase): - - def test_creates_pending_nodegroup(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - form = NodeGroupWithInterfacesForm( - data={'name': name, 'uuid': uuid}) - self.assertTrue(form.is_valid(), form._errors) - nodegroup = form.save() - self.assertEqual( - (uuid, name, NODEGROUP_STATUS.PENDING, 0), - ( - nodegroup.uuid, - nodegroup.name, - nodegroup.status, - nodegroup.nodegroupinterface_set.count(), - )) - - def test_creates_nodegroup_with_status(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - form = NodeGroupWithInterfacesForm( - status=NODEGROUP_STATUS.ACCEPTED, - data={'name': name, 'uuid': uuid}) - self.assertTrue(form.is_valid(), form._errors) - nodegroup = form.save() - self.assertEqual(NODEGROUP_STATUS.ACCEPTED, nodegroup.status) - - def test_validates_parameters(self): - name = factory.make_name('name') - too_long_uuid = 'test' * 30 - form = NodeGroupWithInterfacesForm( - data={'name': name, 'uuid': too_long_uuid}) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'uuid': - ['Ensure this value has at most 36 characters (it has 120).']}, - form._errors) - - def test_rejects_invalid_json_interfaces(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - invalid_interfaces = factory.make_name('invalid_json_interfaces') - form = NodeGroupWithInterfacesForm( - data={ - 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'interfaces': ['Invalid json value.']}, - form._errors) - - def test_rejects_invalid_list_interfaces(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - invalid_interfaces = json.dumps('invalid interface list') - form = NodeGroupWithInterfacesForm( - data={ - 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) - self.assertFalse(form.is_valid()) - self.assertEquals( - {'interfaces': [INTERFACES_VALIDATION_ERROR_MESSAGE]}, - form._errors) - - def test_rejects_invalid_interface(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - interface = make_interface_settings() - # Make the interface invalid. - interface['ip_range_high'] = 'invalid IP address' - interfaces = json.dumps([interface]) - form = NodeGroupWithInterfacesForm( - data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) - self.assertFalse(form.is_valid()) - self.assertIn( - "Enter a valid IPv4 or IPv6 address", - form._errors['interfaces'][0]) - - def test_creates_interface_from_params(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - interface = make_interface_settings() - interfaces = json.dumps([interface]) - form = NodeGroupWithInterfacesForm( - data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) - self.assertTrue(form.is_valid(), form._errors) - form.save() - nodegroup = NodeGroup.objects.get(uuid=uuid) - self.assertThat( - nodegroup.nodegroupinterface_set.all()[0], - MatchesStructure.byEquality(**interface)) - - def test_checks_against_conflicting_managed_networks(self): - big_network = IPNetwork('10.0.0.0/255.255.0.0') - nested_network = IPNetwork('10.0.100.0/255.255.255.0') - managed = NODEGROUPINTERFACE_MANAGEMENT.DHCP - form = NodeGroupWithInterfacesForm( - data={ - 'name': factory.make_name('cluster'), - 'uuid': factory.getRandomUUID(), - 'interfaces': json.dumps([ - make_interface_settings( - network=big_network, management=managed), - make_interface_settings( - network=nested_network, management=managed), - ]), - }) - self.assertFalse(form.is_valid()) - self.assertNotEqual([], form._errors['interfaces']) - self.assertThat( - form._errors['interfaces'][0], - StartsWith("Conflicting networks")) - - def test_ignores_conflicts_on_unmanaged_interfaces(self): - big_network = IPNetwork('10.0.0.0/255.255.0.0') - nested_network = IPNetwork('10.100.100.0/255.255.255.0') - managed = NODEGROUPINTERFACE_MANAGEMENT.DHCP - unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED - form = NodeGroupWithInterfacesForm( - data={ - 'name': factory.make_name('cluster'), - 'uuid': factory.getRandomUUID(), - 'interfaces': json.dumps([ - make_interface_settings( - network=big_network, management=managed), - make_interface_settings( - network=nested_network, management=unmanaged), - ]), - }) - is_valid = form.is_valid() - self.assertEqual( - (True, None), - (is_valid, form._errors.get('interfaces'))) - - def test_creates_multiple_interfaces(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - interfaces = [ - make_interface_settings(management=management) - for management in map_enum(NODEGROUPINTERFACE_MANAGEMENT).values() - ] - form = NodeGroupWithInterfacesForm( - data={ - 'name': name, - 'uuid': uuid, - 'interfaces': json.dumps(interfaces), - }) - self.assertTrue(form.is_valid(), form._errors) - form.save() - nodegroup = NodeGroup.objects.get(uuid=uuid) - self.assertEqual( - len(interfaces), nodegroup.nodegroupinterface_set.count()) - - def test_populates_cluster_name_default(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - form = NodeGroupWithInterfacesForm( - status=NODEGROUP_STATUS.ACCEPTED, - data={'name': name, 'uuid': uuid}) - self.assertTrue(form.is_valid(), form._errors) - nodegroup = form.save() - self.assertIn(uuid, nodegroup.cluster_name) - - def test_populates_cluster_name(self): - cluster_name = factory.make_name('cluster_name') - uuid = factory.getRandomUUID() - form = NodeGroupWithInterfacesForm( - status=NODEGROUP_STATUS.ACCEPTED, - data={'cluster_name': cluster_name, 'uuid': uuid}) - self.assertTrue(form.is_valid(), form._errors) - nodegroup = form.save() - self.assertEqual(cluster_name, nodegroup.cluster_name) - - def test_creates_unmanaged_interfaces(self): - name = factory.make_name('name') - uuid = factory.getRandomUUID() - interface = make_interface_settings() - del interface['management'] - interfaces = json.dumps([interface]) - form = NodeGroupWithInterfacesForm( - data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) - self.assertTrue(form.is_valid(), form._errors) - form.save() - uuid_nodegroup = NodeGroup.objects.get(uuid=uuid) - self.assertEqual( - [NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED], - [ - nodegroup.management for nodegroup in - uuid_nodegroup.nodegroupinterface_set.all() - ]) - - -class TestNodeGroupEdit(MAASServerTestCase): - - def make_form_data(self, nodegroup): - """Create `NodeGroupEdit` form data based on `nodegroup`.""" - return { - 'name': nodegroup.name, - 'cluster_name': nodegroup.cluster_name, - 'status': nodegroup.status, - } - - def test_changes_name(self): - nodegroup = factory.make_node_group(name=factory.make_name('old-name')) - new_name = factory.make_name('new-name') - data = self.make_form_data(nodegroup) - data['name'] = new_name - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(new_name, reload_object(nodegroup).name) - - def test_refuses_name_change_if_dns_managed_and_nodes_in_use(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - data = self.make_form_data(nodegroup) - data['name'] = factory.make_name('new-name') - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertFalse(form.is_valid()) - - def test_accepts_unchanged_name(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - original_name = nodegroup.name - form = NodeGroupEdit( - instance=nodegroup, data=self.make_form_data(nodegroup)) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(original_name, reload_object(nodegroup).name) - - def test_accepts_omitted_name(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - original_name = nodegroup.name - data = self.make_form_data(nodegroup) - del data['name'] - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(original_name, reload_object(nodegroup).name) - - def test_accepts_name_change_if_nodegroup_not_accepted(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - nodegroup.status = NODEGROUP_STATUS.PENDING - data = self.make_form_data(nodegroup) - data['name'] = factory.make_name('new-name') - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertTrue(form.is_valid()) - - def test_accepts_name_change_if_dns_managed_but_no_nodes_in_use(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - node.status = NODE_STATUS.READY - node.save() - data = self.make_form_data(nodegroup) - data['name'] = factory.make_name('new-name') - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(data['name'], reload_object(nodegroup).name) - - def test_accepts_name_change_if_nodes_in_use_but_dns_not_managed(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - [interface] = nodegroup.get_managed_interfaces() - interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP - interface.save() - data = self.make_form_data(nodegroup) - data['name'] = factory.make_name('new-name') - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(data['name'], reload_object(nodegroup).name) - - def test_accepts_name_change_if_nodegroup_has_no_interface(self): - nodegroup, node = factory.make_unrenamable_nodegroup_with_node() - NodeGroupInterface.objects.filter(nodegroup=nodegroup).delete() - data = self.make_form_data(nodegroup) - data['name'] = factory.make_name('new-name') - form = NodeGroupEdit(instance=nodegroup, data=data) - self.assertTrue(form.is_valid()) - form.save() - self.assertEqual(data['name'], reload_object(nodegroup).name) - - -class TestCommissioningFormForm(MAASServerTestCase): - - def test_commissioningform_error_msg_lists_series_choices(self): - form = CommissioningForm() - field = form.fields['commissioning_distro_series'] - self.assertEqual( - compose_invalid_choice_text( - 'commissioning_distro_series', field.choices), - field.error_messages['invalid_choice']) - - -class TestCommissioningScriptForm(MAASServerTestCase): - - def test_creates_commissioning_script(self): - content = factory.getRandomString().encode('ascii') - name = factory.make_name('filename') - uploaded_file = SimpleUploadedFile(content=content, name=name) - form = CommissioningScriptForm(files={'content': uploaded_file}) - self.assertTrue(form.is_valid(), form._errors) - form.save() - new_script = CommissioningScript.objects.get(name=name) - self.assertThat( - new_script, - MatchesStructure.byEquality(name=name, content=content)) - - def test_raises_if_duplicated_name(self): - content = factory.getRandomString().encode('ascii') - name = factory.make_name('filename') - factory.make_commissioning_script(name=name) - uploaded_file = SimpleUploadedFile(content=content, name=name) - form = CommissioningScriptForm(files={'content': uploaded_file}) - self.assertEqual( - (False, {'content': ["A script with that name already exists."]}), - (form.is_valid(), form._errors)) - - def test_rejects_whitespace_in_name(self): - name = factory.make_name('with space') - content = factory.getRandomString().encode('ascii') - uploaded_file = SimpleUploadedFile(content=content, name=name) - form = CommissioningScriptForm(files={'content': uploaded_file}) - self.assertFalse(form.is_valid()) - self.assertEqual( - ["Name contains disallowed characters (e.g. space or quotes)."], - form._errors['content']) - - def test_rejects_quotes_in_name(self): - name = factory.make_name("l'horreur") - content = factory.getRandomString().encode('ascii') - uploaded_file = SimpleUploadedFile(content=content, name=name) - form = CommissioningScriptForm(files={'content': uploaded_file}) - self.assertFalse(form.is_valid()) - self.assertEqual( - ["Name contains disallowed characters (e.g. space or quotes)."], - form._errors['content']) - - -class TestUnconstrainedMultipleChoiceField(MAASServerTestCase): - - def test_accepts_list(self): - value = ['a', 'b'] - instance = UnconstrainedMultipleChoiceField() - self.assertEqual(value, instance.clean(value)) - - -class TestValidatorMultipleChoiceField(MAASServerTestCase): - - def test_field_validates_valid_data(self): - value = ['test@example.com', 'me@example.com'] - field = ValidatorMultipleChoiceField(validator=validate_email) - self.assertEqual(value, field.clean(value)) - - def test_field_uses_validator(self): - value = ['test@example.com', 'invalid-email'] - field = ValidatorMultipleChoiceField(validator=validate_email) - error = self.assertRaises(ValidationError, field.clean, value) - self.assertEquals(['Enter a valid email address.'], error.messages) - - -class TestBulkNodeActionForm(MAASServerTestCase): - - def test_performs_action(self): - node1 = factory.make_node() - node2 = factory.make_node() - node3 = factory.make_node() - system_id_to_delete = [node1.system_id, node2.system_id] - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict( - action=Delete.name, - system_id=system_id_to_delete)) - self.assertTrue(form.is_valid(), form._errors) - done, not_actionable, not_permitted = form.save() - existing_nodes = list(Node.objects.filter( - system_id__in=system_id_to_delete)) - node3_system_id = reload_object(node3).system_id - self.assertEqual( - [2, 0, 0], - [done, not_actionable, not_permitted]) - self.assertEqual( - [[], node3.system_id], - [existing_nodes, node3_system_id]) - - def test_first_action_is_empty(self): - form = BulkNodeActionForm(user=factory.make_admin()) - action = form.fields['action'] - default_action = action.choices[0][0] - required = action.required - # The default action is the empty string (i.e. no action) - # and it's a required field. - self.assertEqual(('', True), (default_action, required)) - - def test_admin_is_offered_bulk_node_change(self): - form = BulkNodeActionForm(user=factory.make_admin()) - choices = form.fields['action'].choices - self.assertNotEqual( - [], - [choice for choice in choices if choice[0] == 'set_zone']) - - def test_nonadmin_is_not_offered_bulk_node_change(self): - form = BulkNodeActionForm(user=factory.make_user()) - choices = form.fields['action'].choices - self.assertEqual( - [], - [choice for choice in choices if choice[0] == 'set_zone']) - - def test_gives_stat_when_not_applicable(self): - node1 = factory.make_node(status=NODE_STATUS.DECLARED) - node2 = factory.make_node(status=NODE_STATUS.FAILED_TESTS) - system_id_for_action = [node1.system_id, node2.system_id] - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict( - action=StartNode.name, - system_id=system_id_for_action)) - self.assertTrue(form.is_valid(), form._errors) - done, not_actionable, not_permitted = form.save() - self.assertEqual( - [0, 2, 0], - [done, not_actionable, not_permitted]) - - def test_gives_stat_when_no_permission(self): - user = factory.make_user() - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - system_id_for_action = [node.system_id] - form = BulkNodeActionForm( - user=user, - data=dict( - action=StopNode.name, - system_id=system_id_for_action)) - self.assertTrue(form.is_valid(), form._errors) - done, not_actionable, not_permitted = form.save() - self.assertEqual( - [0, 0, 1], - [done, not_actionable, not_permitted]) - - def test_gives_stat_when_action_is_inhibited(self): - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict( - action=Delete.name, - system_id=[node.system_id])) - self.assertTrue(form.is_valid(), form._errors) - done, not_actionable, not_permitted = form.save() - self.assertEqual( - [0, 1, 0], - [done, not_actionable, not_permitted]) - - def test_rejects_empty_system_ids(self): - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict(action=Delete.name, system_id=[])) - self.assertFalse(form.is_valid(), form._errors) - self.assertEqual( - ["No node selected."], - form._errors['system_id']) - - def test_rejects_invalid_system_ids(self): - node = factory.make_node() - system_id_to_delete = [node.system_id, "wrong-system_id"] - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict( - action=Delete.name, - system_id=system_id_to_delete)) - self.assertFalse(form.is_valid(), form._errors) - self.assertEqual( - ["Some of the given system ids are invalid system ids."], - form._errors['system_id']) - - def test_rejects_if_no_action(self): - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict(system_id=[factory.make_node().system_id])) - self.assertFalse(form.is_valid(), form._errors) - - def test_rejects_if_invalid_action(self): - form = BulkNodeActionForm( - user=factory.make_admin(), - data=dict( - action="invalid-action", - system_id=[factory.make_node().system_id])) - self.assertFalse(form.is_valid(), form._errors) - - def test_set_zone_sets_zone_on_node(self): - node = factory.make_node() - zone = factory.make_zone() - form = BulkNodeActionForm( - user=factory.make_admin(), - data={ - 'action': 'set_zone', - 'zone': zone.name, - 'system_id': [node.system_id], - }) - self.assertTrue(form.is_valid(), form._errors) - self.assertEqual((1, 0, 0), form.save()) - node = reload_object(node) - self.assertEqual(zone, node.zone) - - def test_set_zone_does_not_work_if_not_admin(self): - node = factory.make_node() - form = BulkNodeActionForm( - user=factory.make_user(), - data={ - 'action': SetZoneBulkAction.name, - 'zone': factory.make_zone().name, - 'system_id': [node.system_id], - }) - self.assertFalse(form.is_valid()) - self.assertIn( - "Select a valid choice. " - "set_zone is not one of the available choices.", - form._errors['action']) - - def test_zone_field_rejects_empty_zone(self): - # If the field is present, the zone name has to be valid - # and the empty string is not a valid zone name. - form = BulkNodeActionForm( - user=factory.make_admin(), - data={ - 'action': SetZoneBulkAction.name, - 'zone': '', - }) - self.assertFalse(form.is_valid(), form._errors) - self.assertEqual( - ["This field is required."], - form._errors['zone']) - - def test_zone_field_present_if_data_is_empty(self): - form = BulkNodeActionForm( - user=factory.make_admin(), - data={}) - self.assertIn('zone', form.fields) - - def test_zone_field_not_present_action_is_not_SetZoneBulkAction(self): - form = BulkNodeActionForm( - user=factory.make_admin(), - data={'action': factory.make_name('action')}) - self.assertNotIn('zone', form.fields) - - def test_set_zone_leaves_unselected_nodes_alone(self): - unselected_node = factory.make_node() - original_zone = unselected_node.zone - form = BulkNodeActionForm( - user=factory.make_admin(), - data={ - 'action': SetZoneBulkAction.name, - 'zone': factory.make_zone().name, - 'system_id': [factory.make_node().system_id], - }) - self.assertTrue(form.is_valid(), form._errors) - self.assertEqual((1, 0, 0), form.save()) - unselected_node = reload_object(unselected_node) - self.assertEqual(original_zone, unselected_node.zone) - - -class TestDownloadProgressForm(MAASServerTestCase): - - def test_updates_instance(self): - progress = factory.make_download_progress_incomplete(size=None) - new_bytes_downloaded = progress.bytes_downloaded + 1 - size = progress.bytes_downloaded + 2 - error = factory.getRandomString() - - form = DownloadProgressForm( - data={ - 'size': size, - 'bytes_downloaded': new_bytes_downloaded, - 'error': error, - }, - instance=progress) - new_progress = form.save() - - progress = reload_object(progress) - self.assertEqual(progress, new_progress) - self.assertEqual(size, progress.size) - self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded) - self.assertEqual(error, progress.error) - - def test_rejects_unknown_ongoing_download(self): - form = DownloadProgressForm( - data={'bytes_downloaded': 1}, instance=None) - - self.assertFalse(form.is_valid()) - - def test_get_download_returns_ongoing_download(self): - progress = factory.make_download_progress_incomplete() - - self.assertEqual( - progress, - DownloadProgressForm.get_download( - progress.nodegroup, progress.filename, - progress.bytes_downloaded + 1)) - - def test_get_download_recognises_start_of_new_download(self): - nodegroup = factory.make_node_group() - filename = factory.getRandomString() - progress = DownloadProgressForm.get_download(nodegroup, filename, None) - self.assertIsNotNone(progress) - self.assertEqual(nodegroup, progress.nodegroup) - self.assertEqual(filename, progress.filename) - self.assertIsNone(progress.bytes_downloaded) - - def test_get_download_returns_none_for_unknown_ongoing_download(self): - self.assertIsNone( - DownloadProgressForm.get_download( - factory.make_node_group(), factory.getRandomString(), 1)) - - -class TestZoneForm(MAASServerTestCase): - """Tests for `ZoneForm`.""" - - def test_creates_zone(self): - name = factory.make_name('zone') - description = factory.getRandomString() - form = ZoneForm(data={'name': name, 'description': description}) - form.save() - zone = Zone.objects.get(name=name) - self.assertIsNotNone(zone) - self.assertEqual(description, zone.description) - - def test_updates_zone(self): - zone = factory.make_zone() - new_description = factory.getRandomString() - form = ZoneForm(data={'description': new_description}, instance=zone) - form.save() - zone = reload_object(zone) - self.assertEqual(new_description, zone.description) - - def test_renames_zone(self): - zone = factory.make_zone() - new_name = factory.make_name('zone') - form = ZoneForm(data={'name': new_name}, instance=zone) - form.save() - zone = reload_object(zone) - self.assertEqual(new_name, zone.name) - self.assertEqual(zone, Zone.objects.get(name=new_name)) - - def test_update_default_zone_description_works(self): - zone = Zone.objects.get_default_zone() - new_description = factory.getRandomString() - form = ZoneForm(data={'description': new_description}, instance=zone) - self.assertTrue(form.is_valid(), form._errors) - form.save() - zone = reload_object(zone) - self.assertEqual(new_description, zone.description) - - def test_disallows_renaming_default_zone(self): - zone = Zone.objects.get_default_zone() - form = ZoneForm( - data={'name': factory.make_name('zone')}, - instance=zone) - self.assertFalse(form.is_valid()) - self.assertEqual( - {'name': ["This zone is the default zone, it cannot be renamed."]}, - form.errors) - - -class TestNetworkForm(MAASServerTestCase): - """Tests for `NetworkForm`.""" - - def test_creates_network(self): - network = factory.getRandomNetwork() - name = factory.make_name('network') - definition = { - 'name': name, - 'description': factory.getRandomString(), - 'ip': "%s" % network.cidr.ip, - 'netmask': "%s" % network.netmask, - 'vlan_tag': factory.make_vlan_tag(), - } - form = NetworkForm(data=definition) - form.save() - network_obj = Network.objects.get(name=name) - self.assertAttributes(network_obj, definition) - - def test_updates_network(self): - network = factory.make_network() - new_description = factory.getRandomString() - form = NetworkForm( - data={'description': new_description}, instance=network) - form.save() - network = reload_object(network) - self.assertEqual(new_description, network.description) - - def test_populates_initial_macaddresses(self): - network = factory.make_network() - macs = [ - factory.make_mac_address(networks=[network]) - for _ in range(3)] - # Create other MAC addresses. - for _ in range(2): - factory.make_mac_address(networks=[factory.make_network()]) - new_description = factory.getRandomString() - form = NetworkForm( - data={'description': new_description}, instance=network) - self.assertItemsEqual( - [mac.mac_address.get_raw() for mac in macs], - form.initial['mac_addresses']) - - def test_macaddresses_are_sorted(self): - network1, network2 = factory.make_networks(2) - macs = [ - factory.make_mac_address(networks=[network1]) - for _ in range(3)] - # Create macs connected to the same node. - macs = macs + [ - factory.make_mac_address(networks=[network1], node=macs[0].node) - for _ in range(3)] - # Create other MAC addresses. - for _ in range(2): - factory.make_mac_address(networks=[network2]) - form = NetworkForm(data={}, instance=network1) - self.assertEqual( - list(MACAddress.objects.all().order_by( - 'node__hostname', 'mac_address')), - list(form.fields['mac_addresses'].queryset)) - - def test_macaddresses_widget_displays_MAC_and_node_hostname(self): - network = factory.make_network() - [ - factory.make_mac_address(networks=[network]) - for _ in range(3)] - # Create other MAC addresses. - for _ in range(2): - factory.make_mac_address(networks=[factory.make_network()]) - form = NetworkForm(data={}, instance=network) - self.assertItemsEqual( - [(mac.mac_address, "%s (%s)" % ( - mac.mac_address, mac.node.hostname)) - for mac in MACAddress.objects.all()], - form.fields['mac_addresses'].widget.choices) - - def test_updates_macaddresses(self): - network = factory.make_network() - # Attach a couple of MAC addresses to the network. - [factory.make_mac_address(networks=[network]) for _ in range(3)] - new_macs = [ - factory.make_mac_address() - for _ in range(3)] - form = NetworkForm( - data={ - 'mac_addresses': [ - mac.mac_address.get_raw() for mac in new_macs], - }, - instance=network) - form.save() - network = reload_object(network) - self.assertItemsEqual(new_macs, network.macaddress_set.all()) - - def test_reports_clashes(self): - # The uniqueness test on the Network model raises a ValidationError - # when it finds a clash, but Django is prone to crashing when the - # exception doesn't take the expected form (bug 1299114). - big_network = IPNetwork('10.9.0.0/16') - nested_network = IPNetwork('10.9.9.0/24') - - existing_network = factory.make_network(network=big_network) - form = NetworkForm(data={ - 'name': factory.make_name('clashing-network'), - 'ip': "%s" % nested_network.cidr.ip, - 'netmask': "%s" % nested_network.netmask, - 'vlan_tag': factory.make_vlan_tag(), - }) - self.assertFalse(form.is_valid()) - message = "IP range clashes with network '%s'." % existing_network.name - self.assertEqual( - { - 'ip': [message], - 'netmask': [message], - }, - form.errors) - - -class TestInstanceListField(MAASServerTestCase): - """Tests for `InstanceListingField`.""" - - def test_field_validates_valid_data(self): - nodes = [factory.make_node() for i in range(3)] - # Create other nodes. - [factory.make_node() for i in range(3)] - field = InstanceListField(model_class=Node, field_name='system_id') - input_data = [node.system_id for node in nodes] - self.assertItemsEqual( - input_data, - [node.system_id for node in field.clean(input_data)]) - - def test_field_ignores_duplicates(self): - nodes = [factory.make_node() for i in range(2)] - # Create other nodes. - [factory.make_node() for i in range(3)] - field = InstanceListField(model_class=Node, field_name='system_id') - input_data = [node.system_id for node in nodes] * 2 - self.assertItemsEqual( - set(input_data), - [node.system_id for node in field.clean(input_data)]) - - def test_field_rejects_invalid_data(self): - nodes = [factory.make_node() for i in range(3)] - field = InstanceListField(model_class=Node, field_name='system_id') - error = self.assertRaises( - ValidationError, - field.clean, [node.system_id for node in nodes] + ['unknown']) - self.assertEquals(['Unknown node(s): unknown.'], error.messages) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_settings.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_settings.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_settings.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_settings.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test forms settings.""" @@ -14,6 +14,7 @@ __metaclass__ = type __all__ = [] + from django import forms from maasserver.forms_settings import ( CONFIG_ITEMS, @@ -30,7 +31,7 @@ class TestGetConfigField(MAASServerTestCase): def test_get_config_field_validates_config_name(self): - config_name = factory.getRandomString() + config_name = factory.make_string() self.assertRaises( forms.ValidationError, get_config_field, config_name) @@ -43,7 +44,7 @@ class TestGetConfigForm(MAASServerTestCase): def test_get_config_form_returns_initialized_form(self): - maas_name = factory.getRandomString() + maas_name = factory.make_string() Config.objects.set_config('maas_name', maas_name) form = get_config_form('maas_name') # The form contains only one field. @@ -70,3 +71,10 @@ compose_invalid_choice_text( 'commissioning_distro_series', field.choices), field.error_messages['invalid_choice']) + + def test_upstream_dns_accepts_ip_list(self): + field = get_config_field('upstream_dns') + ips1 = [factory.make_ip_address() for _ in range(3)] + ips2 = [factory.make_ip_address() for _ in range(3)] + input = ' '.join(ips1) + ' ' + ','.join(ips2) + self.assertEqual(' '.join(ips1 + ips2), field.clean(input)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_user.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_user.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_user.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_user.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,135 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for user-creation forms.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.contrib.auth.models import User +from maasserver.forms import ( + EditUserForm, + NewUserCreationForm, + ProfileForm, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from testtools.matchers import MatchesRegex + + +class TestUniqueEmailForms(MAASServerTestCase): + + def assertFormFailsValidationBecauseEmailNotUnique(self, form): + self.assertFalse(form.is_valid()) + self.assertIn('email', form._errors) + self.assertEquals(1, len(form._errors['email'])) + # Cope with 'Email' and 'E-mail' in error message. + self.assertThat( + form._errors['email'][0], + MatchesRegex( + r'User with this E-{0,1}mail address already exists.')) + + def test_ProfileForm_fails_validation_if_email_taken(self): + another_email = '%s@example.com' % factory.make_string() + factory.make_User(email=another_email) + email = '%s@example.com' % factory.make_string() + user = factory.make_User(email=email) + form = ProfileForm(instance=user, data={'email': another_email}) + self.assertFormFailsValidationBecauseEmailNotUnique(form) + + def test_ProfileForm_validates_if_email_unchanged(self): + email = '%s@example.com' % factory.make_string() + user = factory.make_User(email=email) + form = ProfileForm(instance=user, data={'email': email}) + self.assertTrue(form.is_valid()) + + def test_NewUserCreationForm_fails_validation_if_email_taken(self): + email = '%s@example.com' % factory.make_string() + username = factory.make_string() + password = factory.make_string() + factory.make_User(email=email) + form = NewUserCreationForm( + { + 'email': email, + 'username': username, + 'password1': password, + 'password2': password, + }) + self.assertFormFailsValidationBecauseEmailNotUnique(form) + + def test_EditUserForm_fails_validation_if_email_taken(self): + another_email = '%s@example.com' % factory.make_string() + factory.make_User(email=another_email) + email = '%s@example.com' % factory.make_string() + user = factory.make_User(email=email) + form = EditUserForm(instance=user, data={'email': another_email}) + self.assertFormFailsValidationBecauseEmailNotUnique(form) + + def test_EditUserForm_validates_if_email_unchanged(self): + email = '%s@example.com' % factory.make_string() + user = factory.make_User(email=email) + form = EditUserForm( + instance=user, + data={ + 'email': email, + 'username': factory.make_string(), + }) + self.assertTrue(form.is_valid()) + + +class TestNewUserCreationForm(MAASServerTestCase): + + def test_saves_to_db_by_default(self): + password = factory.make_name('password') + params = { + 'email': '%s@example.com' % factory.make_string(), + 'username': factory.make_name('user'), + 'password1': password, + 'password2': password, + } + form = NewUserCreationForm(params) + form.save() + self.assertIsNotNone(User.objects.get(username=params['username'])) + + def test_email_is_required(self): + password = factory.make_name('password') + params = { + 'email': '', + 'username': factory.make_name('user'), + 'password1': password, + 'password2': password, + } + form = NewUserCreationForm(params) + self.assertFalse(form.is_valid()) + self.assertEquals( + {'email': ['This field is required.']}, + form._errors) + + def test_does_not_save_to_db_if_commit_is_False(self): + password = factory.make_name('password') + params = { + 'email': '%s@example.com' % factory.make_string(), + 'username': factory.make_name('user'), + 'password1': password, + 'password2': password, + } + form = NewUserCreationForm(params) + form.save(commit=False) + self.assertItemsEqual( + [], User.objects.filter(username=params['username'])) + + def test_fields_order(self): + form = NewUserCreationForm() + + self.assertEqual( + ['username', 'last_name', 'email', 'password1', 'password2', + 'is_superuser'], + list(form.fields)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_validate_new_static_ip_range.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_validate_new_static_ip_range.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_validate_new_static_ip_range.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_validate_new_static_ip_range.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,131 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `validate_new_static_ip_ranges`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.core.exceptions import ValidationError +from maasserver.enum import ( + NODEGROUP_STATUS, + NODEGROUPINTERFACE_MANAGEMENT, + ) +from maasserver.forms import ( + ERROR_MESSAGE_STATIC_IPS_OUTSIDE_RANGE, + ERROR_MESSAGE_STATIC_RANGE_IN_USE, + validate_new_static_ip_ranges, + ) +from maasserver.models.staticipaddress import StaticIPAddress +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from netaddr import IPNetwork + + +class TestValidateNewStaticIPRanges(MAASServerTestCase): + """Tests for `validate_new_static_ip_ranges`().""" + + def make_interface(self): + network = IPNetwork("10.1.0.0/24") + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, + network=network) + [interface] = nodegroup.get_managed_interfaces() + interface.ip_range_low = '10.1.0.1' + interface.ip_range_high = '10.1.0.10' + interface.static_ip_range_low = '10.1.0.50' + interface.static_ip_range_high = '10.1.0.60' + interface.save() + return interface + + def test_raises_error_when_allocated_ips_fall_outside_new_range(self): + interface = self.make_interface() + StaticIPAddress.objects.allocate_new('10.1.0.56', '10.1.0.60') + error = self.assertRaises( + ValidationError, + validate_new_static_ip_ranges, + instance=interface, + static_ip_range_low='10.1.0.50', + static_ip_range_high='10.1.0.55') + self.assertEqual( + ERROR_MESSAGE_STATIC_IPS_OUTSIDE_RANGE, + error.message) + + def test_removing_static_range_raises_error_if_ips_allocated(self): + interface = self.make_interface() + StaticIPAddress.objects.allocate_new('10.1.0.56', '10.1.0.60') + error = self.assertRaises( + ValidationError, + validate_new_static_ip_ranges, + instance=interface, + static_ip_range_low='', + static_ip_range_high='') + self.assertEqual( + ERROR_MESSAGE_STATIC_RANGE_IN_USE, + error.message) + + def test_allows_range_expansion(self): + interface = self.make_interface() + StaticIPAddress.objects.allocate_new('10.1.0.56', '10.1.0.60') + is_valid = validate_new_static_ip_ranges( + interface, static_ip_range_low='10.1.0.40', + static_ip_range_high='10.1.0.100') + self.assertTrue(is_valid) + + def test_allows_allocated_ip_as_upper_bound(self): + interface = self.make_interface() + StaticIPAddress.objects.allocate_new('10.1.0.55', '10.1.0.55') + is_valid = validate_new_static_ip_ranges( + interface, + static_ip_range_low=interface.static_ip_range_low, + static_ip_range_high='10.1.0.55') + self.assertTrue(is_valid) + + def test_allows_allocated_ip_as_lower_bound(self): + interface = self.make_interface() + StaticIPAddress.objects.allocate_new('10.1.0.55', '10.1.0.55') + is_valid = validate_new_static_ip_ranges( + interface, static_ip_range_low='10.1.0.55', + static_ip_range_high=interface.static_ip_range_high) + self.assertTrue(is_valid) + + def test_ignores_unmanaged_interfaces(self): + interface = self.make_interface() + interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED + interface.save() + StaticIPAddress.objects.allocate_new( + interface.static_ip_range_low, interface.static_ip_range_high) + is_valid = validate_new_static_ip_ranges( + interface, static_ip_range_low='10.1.0.57', + static_ip_range_high='10.1.0.58') + self.assertTrue(is_valid) + + def test_ignores_interfaces_with_no_static_range(self): + interface = self.make_interface() + interface.static_ip_range_low = None + interface.static_ip_range_high = None + interface.save() + StaticIPAddress.objects.allocate_new('10.1.0.56', '10.1.0.60') + is_valid = validate_new_static_ip_ranges( + interface, static_ip_range_low='10.1.0.57', + static_ip_range_high='10.1.0.58') + self.assertTrue(is_valid) + + def test_ignores_unchanged_static_range(self): + interface = self.make_interface() + StaticIPAddress.objects.allocate_new( + interface.static_ip_range_low, interface.static_ip_range_high) + is_valid = validate_new_static_ip_ranges( + interface, + static_ip_range_low=interface.static_ip_range_low, + static_ip_range_high=interface.static_ip_range_high) + self.assertTrue(is_valid) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_validate_nonoverlapping_networks.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_validate_nonoverlapping_networks.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_validate_nonoverlapping_networks.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_validate_nonoverlapping_networks.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,111 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `validate_nonoverlapping_networks`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from django.core.exceptions import ValidationError +from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT +from maasserver.forms import validate_nonoverlapping_networks +from maastesting.factory import factory +from testtools import TestCase +from testtools.matchers import ( + Contains, + MatchesAll, + MatchesRegex, + StartsWith, + ) + + +class TestValidateNonoverlappingNetworks(TestCase): + """Tests for `validate_nonoverlapping_networks`.""" + + def make_interface_definition(self, ip, netmask, name=None): + """Return a minimal imitation of an interface definition.""" + if name is None: + name = factory.make_name('itf') + return { + 'interface': name, + 'ip': ip, + 'subnet_mask': netmask, + 'management': NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, + } + + def test_accepts_zero_interfaces(self): + validate_nonoverlapping_networks([]) + # Success is getting here without error. + pass + + def test_accepts_single_interface(self): + validate_nonoverlapping_networks( + [self.make_interface_definition('10.1.1.1', '255.255.0.0')]) + # Success is getting here without error. + pass + + def test_accepts_disparate_ranges(self): + validate_nonoverlapping_networks([ + self.make_interface_definition('10.1.0.0', '255.255.0.0'), + self.make_interface_definition('192.168.0.0', '255.255.255.0'), + ]) + # Success is getting here without error. + pass + + def test_accepts_near_neighbours(self): + validate_nonoverlapping_networks([ + self.make_interface_definition('10.1.0.0', '255.255.0.0'), + self.make_interface_definition('10.2.0.0', '255.255.0.0'), + ]) + # Success is getting here without error. + pass + + def test_rejects_identical_ranges(self): + definitions = [ + self.make_interface_definition('192.168.0.0', '255.255.255.0'), + self.make_interface_definition('192.168.0.0', '255.255.255.0'), + ] + error = self.assertRaises( + ValidationError, + validate_nonoverlapping_networks, definitions) + error_text = error.messages[0] + self.assertThat( + error_text, MatchesRegex( + "Conflicting networks on [^\\s]+ and [^\\s]+: " + "address ranges overlap.")) + self.assertThat( + error_text, + MatchesAll( + *( + Contains(definition['interface']) + for definition in definitions + ))) + + def test_rejects_nested_ranges(self): + definitions = [ + self.make_interface_definition('192.168.0.0', '255.255.0.0'), + self.make_interface_definition('192.168.100.0', '255.255.255.0'), + ] + error = self.assertRaises( + ValidationError, + validate_nonoverlapping_networks, definitions) + self.assertIn("Conflicting networks", unicode(error)) + + def test_detects_conflict_regardless_of_order(self): + definitions = [ + self.make_interface_definition('192.168.100.0', '255.255.255.0'), + self.make_interface_definition('192.168.1.0', '255.255.255.0'), + self.make_interface_definition('192.168.64.0', '255.255.192.0'), + ] + error = self.assertRaises( + ValidationError, + validate_nonoverlapping_networks, definitions) + self.assertThat(error.messages[0], StartsWith("Conflicting networks")) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_zone.py maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_zone.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_forms_zone.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_forms_zone.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,70 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `ZoneForm`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.forms import ZoneForm +from maasserver.models import Zone +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase + + +class TestZoneForm(MAASServerTestCase): + """Tests for `ZoneForm`.""" + + def test_creates_zone(self): + name = factory.make_name('zone') + description = factory.make_string() + form = ZoneForm(data={'name': name, 'description': description}) + form.save() + zone = Zone.objects.get(name=name) + self.assertIsNotNone(zone) + self.assertEqual(description, zone.description) + + def test_updates_zone(self): + zone = factory.make_Zone() + new_description = factory.make_string() + form = ZoneForm(data={'description': new_description}, instance=zone) + form.save() + zone = reload_object(zone) + self.assertEqual(new_description, zone.description) + + def test_renames_zone(self): + zone = factory.make_Zone() + new_name = factory.make_name('zone') + form = ZoneForm(data={'name': new_name}, instance=zone) + form.save() + zone = reload_object(zone) + self.assertEqual(new_name, zone.name) + self.assertEqual(zone, Zone.objects.get(name=new_name)) + + def test_update_default_zone_description_works(self): + zone = Zone.objects.get_default_zone() + new_description = factory.make_string() + form = ZoneForm(data={'description': new_description}, instance=zone) + self.assertTrue(form.is_valid(), form._errors) + form.save() + zone = reload_object(zone) + self.assertEqual(new_description, zone.description) + + def test_disallows_renaming_default_zone(self): + zone = Zone.objects.get_default_zone() + form = ZoneForm( + data={'name': factory.make_name('zone')}, + instance=zone) + self.assertFalse(form.is_valid()) + self.assertEqual( + {'name': ["This zone is the default zone, it cannot be renamed."]}, + form.errors) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_js.py maas-1.7.6+bzr3376/src/maasserver/tests/test_js.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_js.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_js.py 2015-07-10 01:27:14.000000000 +0000 @@ -27,6 +27,7 @@ relpath, ) import sys +from unittest import SkipTest from urlparse import urljoin from maastesting import ( @@ -168,6 +169,12 @@ class YUIUnitTestsLocal(YUIUnitTestsBase, MAASTestCase): + @classmethod + def setUpClass(cls): + raise SkipTest( + "XXX: Gavin Panella 2015-02-26 bug=1426010: " + "All tests using Selenium are breaking.") + scenarios = tuple( (relpath(path, root), {"test_url": "file://%s" % abspath(path)}) for path in YUIUnitTestsBase.test_paths) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_messages.py maas-1.7.6+bzr3376/src/maasserver/tests/test_messages.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_messages.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_messages.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,175 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test maasserver messages.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import json -import socket - -from maasserver.exceptions import NoRabbit -from maasserver.messages import ( - MAASMessenger, - MESSENGER_EVENT, - MessengerBase, - ) -from maasserver.models import Node -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.tests.models import MessagesTestModel -from maastesting.djangotestcase import TestModelMixin - - -class FakeProducer: - """A fake RabbitProducer that simply records published messages.""" - - def __init__(self): - self.messages = [] - - def publish(self, message): - self.messages.append(message) - - -class TestMessenger(MessengerBase): - - def create_msg(self, event_name, instance): - return [event_name, instance] - - -class MessengerBaseTest(TestModelMixin, MAASServerTestCase): - - app = 'maasserver.tests' - - def test_update_obj_publishes_message_if_created(self): - producer = FakeProducer() - messenger = TestMessenger(MessagesTestModel, producer) - instance = factory.getRandomString() - messenger.update_obj(MessagesTestModel, instance, True) - self.assertEqual( - [[MESSENGER_EVENT.CREATED, instance]], producer.messages) - - def test_update_obj_publishes_message_if_not_created(self): - producer = FakeProducer() - messenger = TestMessenger(MessagesTestModel, producer) - instance = factory.getRandomString() - messenger.update_obj(MessagesTestModel, instance, False) - self.assertEqual( - [[MESSENGER_EVENT.UPDATED, instance]], producer.messages) - - def test_delete_obj_publishes_message(self): - producer = FakeProducer() - messenger = TestMessenger(MessagesTestModel, producer) - instance = factory.getRandomString() - messenger.delete_obj(MessagesTestModel, instance) - self.assertEqual( - [[MESSENGER_EVENT.DELETED, instance]], producer.messages) - - def test_register_registers_update_signal(self): - producer = FakeProducer() - messenger = TestMessenger(MessagesTestModel, producer) - obj = MessagesTestModel(name=factory.getRandomString()) - obj.save() - messenger.register() - obj.save() - self.assertEqual( - [[MESSENGER_EVENT.UPDATED, obj]], producer.messages) - - def test_register_registers_created_signal(self): - producer = FakeProducer() - messenger = TestMessenger(MessagesTestModel, producer) - messenger.register() - obj = MessagesTestModel(name=factory.getRandomString()) - obj.save() - self.assertEqual( - [[MESSENGER_EVENT.CREATED, obj]], producer.messages) - - def test_register_registers_delete_signal(self): - obj = MessagesTestModel(name=factory.getRandomString()) - obj.save() - producer = FakeProducer() - messenger = TestMessenger(MessagesTestModel, producer) - messenger.register() - obj.delete() - self.assertEqual( - [[MESSENGER_EVENT.DELETED, obj]], producer.messages) - - def test_publish_message_publishes_message(self): - event = factory.getRandomString() - instance = {factory.getRandomString(): factory.getRandomString()} - messenger = TestMessenger(MessagesTestModel, FakeProducer()) - messenger.publish_message(messenger.create_msg(event, instance)) - self.assertEqual([[event, instance]], messenger.producer.messages) - - def test_publish_message_swallows_missing_rabbit(self): - event = factory.getRandomString() - instance = {factory.getRandomString(): factory.getRandomString()} - - def fail_for_lack_of_rabbit(*args, **kwargs): - raise NoRabbit("I'm pretending not to have a RabbitMQ.") - - messenger = TestMessenger(MessagesTestModel, FakeProducer()) - messenger.producer.publish = fail_for_lack_of_rabbit - - messenger.publish_message(messenger.create_msg(event, instance)) - self.assertEqual([], messenger.producer.messages) - - def test_publish_message_propagates_exceptions(self): - event = factory.getRandomString() - instance = {factory.getRandomString(): factory.getRandomString()} - - def fail_despite_having_a_rabbit(*args, **kwargs): - raise socket.error("I have a rabbit but I fail anyway.") - - messenger = TestMessenger(MessagesTestModel, FakeProducer()) - messenger.producer.publish = fail_despite_having_a_rabbit - - self.assertRaises( - socket.error, - messenger.publish_message, messenger.create_msg(event, instance)) - self.assertEqual([], messenger.producer.messages) - - -class MAASMessengerTest(TestModelMixin, MAASServerTestCase): - - app = 'maasserver.tests' - - def test_event_key(self): - producer = FakeProducer() - event_name = factory.getRandomString() - obj = MessagesTestModel(name=factory.getRandomString()) - messenger = MAASMessenger(MessagesTestModel, producer) - self.assertEqual( - '%s.%s' % ('MessagesTestModel', event_name), - messenger.event_key(event_name, obj)) - - def test_create_msg(self): - producer = FakeProducer() - messenger = MAASMessenger(Node, producer) - event_name = factory.getRandomString() - obj_name = factory.getRandomString() - obj = MessagesTestModel(name=obj_name) - obj.save() - msg = messenger.create_msg(event_name, obj) - decoded_msg = json.loads(msg) - self.assertItemsEqual(['instance', 'event_key'], list(decoded_msg)) - self.assertItemsEqual( - ['id', 'name'], list(decoded_msg['instance'])) - self.assertEqual( - obj_name, decoded_msg['instance']['name']) - - def test_msg_containing_node_representation(self): - node = factory.make_node() - messenger = MAASMessenger(Node, FakeProducer()) - msg = messenger.create_msg(factory.getRandomString(), node) - decoded_msg = json.loads(msg) - self.assertItemsEqual(['instance', 'event_key'], list(decoded_msg)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_middleware.py maas-1.7.6+bzr3376/src/maasserver/tests/test_middleware.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_middleware.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_middleware.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,19 +14,33 @@ __metaclass__ = type __all__ = [] +import datetime import httplib import json import logging +import random +from crochet import TimeoutError from django.contrib.messages import constants from django.core.exceptions import ( PermissionDenied, ValidationError, ) +from django.core.urlresolvers import reverse from django.http import HttpResponse from django.http.request import build_request_repr -from django.test.client import RequestFactory from fixtures import FakeLogger +from maasserver import middleware as middleware_module +from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image +from maasserver.clusterrpc.utils import get_error_message_for_exception +from maasserver.components import ( + get_persistent_error, + register_persistent_error, + ) +from maasserver.enum import ( + COMPONENT, + NODEGROUP_STATUS, + ) from maasserver.exceptions import ( ExternalComponentException, MAASAPIException, @@ -35,82 +49,79 @@ ) from maasserver.middleware import ( APIErrorsMiddleware, + APIRPCErrorsMiddleware, DebuggingLoggerMiddleware, ErrorsMiddleware, - ExceptionLoggerMiddleware, ExceptionMiddleware, + ExternalComponentsMiddleware, + RPCErrorsMiddleware, ) +from maasserver.models import nodegroup as nodegroup_module from maasserver.testing import extract_redirect from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) from maastesting.utils import sample_binary_data +from provisioningserver.rpc.exceptions import ( + MultipleFailures, + NoConnectionsAvailable, + PowerActionAlreadyInProgress, + ) +from provisioningserver.utils.shell import ExternalProcessError +from provisioningserver.utils.text import normalise_whitespace from testtools.matchers import ( Contains, + Equals, Not, ) - - -class Messages: - """A class to record messages published by Django messaging - framework. - """ - - messages = [] - - def add(self, level, message, extras): - self.messages.append((level, message, extras)) - - -def fake_request(path, method='GET'): - """Create a fake request. - - :param path: The path to make the request to. - :param method: The method to use for the reques - ('GET' or 'POST'). - """ - rf = RequestFactory() - request = rf.get(path) - request.method = method - request._messages = Messages() - return request +from twisted.python.failure import Failure class ExceptionMiddlewareTest(MAASServerTestCase): def make_base_path(self): """Return a path to handle exceptions for.""" - return "/%s" % factory.getRandomString() + return "/%s" % factory.make_string() - def make_middleware(self, base_path): + def make_middleware(self, base_path, retry_after=None): """Create an ExceptionMiddleware for base_path.""" class TestingExceptionMiddleware(ExceptionMiddleware): path_regex = base_path - return TestingExceptionMiddleware() + testing_middleware = TestingExceptionMiddleware() + if retry_after is not None: + testing_middleware.RETRY_AFTER_SERVICE_UNAVAILABLE = retry_after + + return testing_middleware - def process_exception(self, exception): + def process_exception(self, exception, retry_after=None): """Run a given exception through a fake ExceptionMiddleware. :param exception: The exception to simulate. :type exception: Exception + :param retry_after: Value of the RETRY_AFTER_SERVICE_UNAVAILABLE to + use in the fake middleware. :return: The response as returned by the ExceptionMiddleware. :rtype: HttpResponse or None. """ base_path = self.make_base_path() - middleware = self.make_middleware(base_path) - request = fake_request(base_path) + middleware = self.make_middleware(base_path, retry_after) + request = factory.make_fake_request(base_path) return middleware.process_exception(request, exception) def test_ignores_paths_outside_path_regex(self): middleware = self.make_middleware(self.make_base_path()) - request = fake_request(self.make_base_path()) + request = factory.make_fake_request(self.make_base_path()) exception = MAASAPINotFound("Huh?") self.assertIsNone(middleware.process_exception(request, exception)) def test_unknown_exception_generates_internal_server_error(self): # An unknown exception generates an internal server error with the # exception message. - error_message = factory.getRandomString() + error_message = factory.make_string() response = self.process_exception(RuntimeError(error_message)) self.assertEqual( (httplib.INTERNAL_SERVER_ERROR, error_message), @@ -120,7 +131,7 @@ class MyException(MAASAPIException): api_error = httplib.UNAUTHORIZED - error_message = factory.getRandomString() + error_message = factory.make_string() exception = MyException(error_message) response = self.process_exception(exception) self.assertEqual( @@ -138,7 +149,7 @@ (response.status_code, response.content.decode('utf-8'))) def test_reports_ValidationError_as_Bad_Request(self): - error_message = factory.getRandomString() + error_message = factory.make_string() response = self.process_exception(ValidationError(error_message)) self.assertEqual( (httplib.BAD_REQUEST, error_message), @@ -152,49 +163,60 @@ self.assertIn('application/json', response['Content-Type']) def test_reports_PermissionDenied_as_Forbidden(self): - error_message = factory.getRandomString() + error_message = factory.make_string() response = self.process_exception(PermissionDenied(error_message)) self.assertEqual( (httplib.FORBIDDEN, error_message), (response.status_code, response.content)) + def test_api_500_error_is_logged(self): + logger = self.useFixture(FakeLogger('maasserver')) + error_text = factory.make_string() + self.process_exception(MAASAPIException(error_text)) + self.assertThat(logger.output, Contains(error_text)) + + def test_generic_500_error_is_logged(self): + logger = self.useFixture(FakeLogger('maasserver')) + error_text = factory.make_string() + self.process_exception(Exception(error_text)) + self.assertThat(logger.output, Contains(error_text)) + + def test_reports_ExternalProcessError_as_ServiceUnavailable(self): + error_text = factory.make_string() + exception = ExternalProcessError(1, ["cmd"], error_text) + retry_after = random.randint(0, 10) + response = self.process_exception(exception, retry_after) + self.expectThat( + response.status_code, Equals(httplib.SERVICE_UNAVAILABLE)) + self.expectThat(response.content, Equals(unicode(exception))) + self.expectThat(response['Retry-After'], Equals("%s" % retry_after)) + class APIErrorsMiddlewareTest(MAASServerTestCase): def test_handles_error_on_API(self): middleware = APIErrorsMiddleware() - non_api_request = fake_request("/api/1.0/hello") - error_message = factory.getRandomString() + api_request = factory.make_fake_request("/api/1.0/hello") + error_message = factory.make_string() exception = MAASAPINotFound(error_message) - response = middleware.process_exception(non_api_request, exception) + response = middleware.process_exception(api_request, exception) self.assertEqual( (httplib.NOT_FOUND, error_message), (response.status_code, response.content)) def test_ignores_error_outside_API(self): middleware = APIErrorsMiddleware() - non_api_request = fake_request("/middleware/api/hello") - exception = MAASAPINotFound(factory.getRandomString()) + non_api_request = factory.make_fake_request("/middleware/api/hello") + exception = MAASAPINotFound(factory.make_string()) self.assertIsNone( middleware.process_exception(non_api_request, exception)) -class ExceptionLoggerMiddlewareTest(MAASServerTestCase): - - def test_exception_logger_logs_error(self): - logger = self.useFixture(FakeLogger('maasserver')) - error_text = factory.getRandomString() - ExceptionLoggerMiddleware().process_exception( - fake_request('/middleware/api/hello'), - ValueError(error_text)) - self.assertThat(logger.output, Contains(error_text)) - - class DebuggingLoggerMiddlewareTest(MAASServerTestCase): def test_debugging_logger_does_not_log_request_if_info_level(self): logger = self.useFixture(FakeLogger('maasserver', logging.INFO)) - request = fake_request("/api/1.0/nodes/") + request = factory.make_fake_request("/api/1.0/nodes/") DebuggingLoggerMiddleware().process_request(request) self.assertThat( logger.output, @@ -202,7 +224,7 @@ def test_debugging_logger_does_not_log_response_if_info_level(self): logger = self.useFixture(FakeLogger('maasserver', logging.INFO)) - request = fake_request("/api/1.0/nodes/") + request = factory.make_fake_request("/api/1.0/nodes/") response = HttpResponse( content="test content", status=httplib.OK, @@ -213,14 +235,14 @@ def test_debugging_logger_logs_request(self): logger = self.useFixture(FakeLogger('maasserver', logging.DEBUG)) - request = fake_request("/api/1.0/nodes/") + request = factory.make_fake_request("/api/1.0/nodes/") request.content = "test content" DebuggingLoggerMiddleware().process_request(request) self.assertThat(logger.output, Contains(build_request_repr(request))) def test_debugging_logger_logs_response(self): logger = self.useFixture(FakeLogger('maasserver', logging.DEBUG)) - request = fake_request("foo") + request = factory.make_fake_request("foo") response = HttpResponse( content="test content", status=httplib.OK, @@ -231,7 +253,7 @@ def test_debugging_logger_logs_binary_response(self): logger = self.useFixture(FakeLogger('maasserver', logging.DEBUG)) - request = fake_request("foo") + request = factory.make_fake_request("foo") response = HttpResponse( content=sample_binary_data, status=httplib.OK, @@ -246,7 +268,7 @@ def test_error_middleware_ignores_GET_requests(self): self.client_log_in() - request = fake_request(factory.getRandomString(), 'GET') + request = factory.make_fake_request(factory.make_string(), 'GET') exception = MAASException() error_middleware = ErrorsMiddleware() response = error_middleware.process_exception(request, exception) @@ -254,7 +276,7 @@ def test_error_middleware_ignores_non_ExternalComponentException(self): self.client_log_in() - request = fake_request(factory.getRandomString(), 'GET') + request = factory.make_fake_request(factory.make_string(), 'GET') exception = ValueError() error_middleware = ErrorsMiddleware() response = error_middleware.process_exception(request, exception) @@ -262,9 +284,9 @@ def test_error_middleware_handles_ExternalComponentException(self): self.client_log_in() - url = factory.getRandomString() - request = fake_request(url, 'POST') - error_message = factory.getRandomString() + url = factory.make_string() + request = factory.make_fake_request(url, 'POST') + error_message = factory.make_string() exception = ExternalComponentException(error_message) error_middleware = ErrorsMiddleware() response = error_middleware.process_exception(request, exception) @@ -273,3 +295,444 @@ # An error message has been published. self.assertEqual( [(constants.ERROR, error_message, '')], request._messages.messages) + + +class RPCErrorsMiddlewareTest(MAASServerTestCase): + + def test_handles_PowerActionAlreadyInProgress(self): + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + error_message = ( + "Unable to execute power action: another action is " + "already in progress for node %s" % factory.make_name('node')) + error = PowerActionAlreadyInProgress(error_message) + response = middleware.process_exception(request, error) + + # The response is a redirect. + self.assertEqual(request.path, extract_redirect(response)) + # An error message has been published. + self.assertEqual( + [(constants.ERROR, "Error: %s" % error_message, '')], + request._messages.messages) + + def test_handles_MultipleFailures(self): + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + failures = [] + for _ in range(3): + error_message = factory.make_name("error-") + exception_class = random.choice( + (NoConnectionsAvailable, PowerActionAlreadyInProgress)) + failures.append(Failure(exception_class(error_message))) + exception = MultipleFailures(*failures) + response = middleware.process_exception(request, exception) + + # The response is a redirect. + self.assertEqual(request.path, extract_redirect(response)) + # An error message has been published for each exception. + self.assertEqual( + [(constants.ERROR, "Error: %s" % unicode(failure.value), '') + for failure in failures], + request._messages.messages) + + def test_handles_NoConnectionsAvailable(self): + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + error_message = ( + "No connections availble for cluster %s" % + factory.make_name('cluster')) + error = NoConnectionsAvailable(error_message) + response = middleware.process_exception(request, error) + + # The response is a redirect. + self.assertEqual(request.path, extract_redirect(response)) + # An error message has been published. + self.assertEqual( + [(constants.ERROR, "Error: " + error_message, '')], + request._messages.messages) + + def test_handles_TimeoutError(self): + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + error_message = "Here, have a picture of Queen Victoria!" + error = TimeoutError(error_message) + response = middleware.process_exception(request, error) + + # The response is a redirect. + self.assertEqual(request.path, extract_redirect(response)) + # An error message has been published. + self.assertEqual( + [(constants.ERROR, "Error: " + error_message, '')], + request._messages.messages) + + def test_ignores_non_rpc_errors(self): + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + exception = ZeroDivisionError( + "You may think it's a long walk down the street to the chemist " + "but that's just peanuts to space!") + response = middleware.process_exception(request, exception) + self.assertIsNone(response) + + def test_adds_message_for_unknown_errors_in_multiple_failures(self): + # If an exception has no message, the middleware will generate a + # useful one and display it to the user. + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + unknown_exception = ZeroDivisionError() + failures = [ + Failure(unknown_exception), + Failure(PowerActionAlreadyInProgress("Unzip a banana!")), + ] + exception = MultipleFailures(*failures) + response = middleware.process_exception(request, exception) + self.assertEqual(request.path, extract_redirect(response)) + + expected_messages = [ + ( + constants.ERROR, + "Error: %s" % get_error_message_for_exception( + unknown_exception), + '', + ), + (constants.ERROR, "Error: %s" % unicode(failures[1].value), ''), + ] + self.assertEqual( + expected_messages, + request._messages.messages) + + def test_ignores_error_on_API(self): + middleware = RPCErrorsMiddleware() + non_api_request = factory.make_fake_request("/api/1.0/ohai") + exception_class = random.choice( + (NoConnectionsAvailable, PowerActionAlreadyInProgress)) + exception = exception_class(factory.make_string()) + self.assertIsNone( + middleware.process_exception(non_api_request, exception)) + + def test_no_connections_available_has_usable_cluster_name_in_msg(self): + # If a NoConnectionsAvailable exception carries a reference to + # the cluster UUID, RPCErrorsMiddleware will look up the + # cluster's name and make the error message it displays more + # useful. + middleware = RPCErrorsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'POST') + cluster = factory.make_NodeGroup() + error = NoConnectionsAvailable( + factory.make_name('msg'), uuid=cluster.uuid) + middleware.process_exception(request, error) + + expected_error_message = ( + "Error: Unable to connect to cluster '%s' (%s); no connections " + "available." % (cluster.cluster_name, cluster.uuid)) + self.assertEqual( + [(constants.ERROR, expected_error_message, '')], + request._messages.messages) + + +class APIRPCErrorsMiddlewareTest(MAASServerTestCase): + + def test_handles_error_on_API(self): + middleware = APIRPCErrorsMiddleware() + api_request = factory.make_fake_request("/api/1.0/hello") + error_message = factory.make_string() + exception_class = random.choice( + (NoConnectionsAvailable, PowerActionAlreadyInProgress)) + exception = exception_class(error_message) + response = middleware.process_exception(api_request, exception) + self.assertEqual( + (middleware.handled_exceptions[exception_class], error_message), + (response.status_code, response.content)) + + def test_ignores_error_outside_API(self): + middleware = APIRPCErrorsMiddleware() + non_api_request = factory.make_fake_request("/middleware/api/hello") + exception_class = random.choice( + (NoConnectionsAvailable, PowerActionAlreadyInProgress)) + exception = exception_class(factory.make_string()) + self.assertIsNone( + middleware.process_exception(non_api_request, exception)) + + def test_no_connections_available_returned_as_503(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + error_message = ( + "Unable to connect to cluster '%s'; no connections available" % + factory.make_name('cluster')) + error = NoConnectionsAvailable(error_message) + response = middleware.process_exception(request, error) + + self.assertEqual( + (httplib.SERVICE_UNAVAILABLE, error_message), + (response.status_code, response.content)) + + def test_503_response_includes_retry_after_header_by_default(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + error = NoConnectionsAvailable(factory.make_name()) + response = middleware.process_exception(request, error) + + self.assertEqual( + ( + httplib.SERVICE_UNAVAILABLE, + '%s' % middleware.RETRY_AFTER_SERVICE_UNAVAILABLE, + ), + (response.status_code, response['Retry-after'])) + + def test_power_action_already_in_progress_returned_as_503(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + error_message = ( + "Unable to execute power action: another action is already in " + "progress for node %s" % factory.make_name('node')) + error = PowerActionAlreadyInProgress(error_message) + response = middleware.process_exception(request, error) + + self.assertEqual( + (httplib.SERVICE_UNAVAILABLE, error_message), + (response.status_code, response.content)) + + def test_multiple_failures_returned_as_500(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + failures = [] + error_messages = [] + for _ in range(3): + error_message = factory.make_name("error-") + error_messages.append(error_message) + exception_class = random.choice( + (NoConnectionsAvailable, PowerActionAlreadyInProgress)) + failures.append(Failure(exception_class(error_message))) + exception = MultipleFailures(*failures) + response = middleware.process_exception(request, exception) + + expected_error_message = "\n".join(error_messages) + self.assertEqual( + (httplib.INTERNAL_SERVER_ERROR, expected_error_message), + (response.status_code, response.content)) + + def test_multiple_failures_with_one_exception(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + expected_error_message = factory.make_name("error") + unique_exception = PowerActionAlreadyInProgress(expected_error_message) + exception = MultipleFailures(Failure(unique_exception)) + response = middleware.process_exception(request, exception) + + self.assertEqual( + (httplib.SERVICE_UNAVAILABLE, expected_error_message), + (response.status_code, response.content)) + + def test_handles_TimeoutError(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + error_message = "No thanks, I'm trying to give them up." + error = TimeoutError(error_message) + response = middleware.process_exception(request, error) + + self.assertEqual( + (httplib.GATEWAY_TIMEOUT, error_message), + (response.status_code, response.content)) + + def test_adds_message_for_unknown_errors_in_multiple_failures(self): + # If an exception has no message, the middleware will generate a + # useful one and display it to the user. + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + unknown_exception = ZeroDivisionError() + error_message = "It ain't 'alf 'ot mum!" + expected_error_message = "\n".join([ + get_error_message_for_exception(unknown_exception), + error_message]) + failures = [ + Failure(unknown_exception), + Failure(PowerActionAlreadyInProgress(error_message)), + ] + exception = MultipleFailures(*failures) + response = middleware.process_exception(request, exception) + + self.assertEqual( + (httplib.INTERNAL_SERVER_ERROR, expected_error_message), + (response.status_code, response.content)) + + def test_ignores_non_rpc_errors(self): + middleware = APIRPCErrorsMiddleware() + request = factory.make_fake_request( + "/api/1.0/" + factory.make_string(), 'POST') + exception = ZeroDivisionError( + "You may think it's a long walk down the street to the chemist " + "but that's just peanuts to space!") + response = middleware.process_exception(request, exception) + self.assertIsNone(response) + + +class ExternalComponentsMiddlewareTest(MAASServerTestCase): + """Tests for the ExternalComponentsMiddleware.""" + + def test__checks_connectivity_of_accepted_clusters(self): + get_client_for = self.patch(nodegroup_module, 'getClientFor') + middleware = ExternalComponentsMiddleware() + cluster = factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + self.assertThat( + get_client_for, MockCalledOnceWith(cluster.uuid, timeout=0)) + + def test__ignores_non_accepted_clusters(self): + get_client_for = self.patch(nodegroup_module, 'getClientFor') + factory.make_NodeGroup( + status=factory.pick_enum( + NODEGROUP_STATUS, but_not=[NODEGROUP_STATUS.ACCEPTED])) + middleware = ExternalComponentsMiddleware() + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + self.assertThat(get_client_for, MockNotCalled()) + + def test__registers_error_if_all_clusters_are_disconnected(self): + get_client_for = self.patch(nodegroup_module, 'getClientFor') + get_client_for.side_effect = NoConnectionsAvailable( + "Why, it's a jet-propelled, guided NAAFI!") + middleware = ExternalComponentsMiddleware() + factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + error = get_persistent_error(COMPONENT.CLUSTERS) + self.assertEqual( + "One or more clusters are currently disconnected. Visit the " + "clusters page for more information." % + reverse('cluster-list'), + error) + + def test__registers_error_if_any_clusters_are_disconnected(self): + get_client_for = self.patch(nodegroup_module, 'getClientFor') + get_client_for.side_effect = [ + NoConnectionsAvailable("Why, it's a jet-propelled, guided NAAFI!"), + None, + ] + middleware = ExternalComponentsMiddleware() + factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + error = get_persistent_error(COMPONENT.CLUSTERS) + self.assertEqual( + "One or more clusters are currently disconnected. Visit the " + "clusters page for more information." % + reverse('cluster-list'), + error) + + def test__removes_error_once_all_clusters_are_connected(self): + # Patch getClientFor() to ensure that we don't actually try to + # connect to the cluster. + self.patch(nodegroup_module, 'getClientFor') + middleware = ExternalComponentsMiddleware() + factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + + register_persistent_error( + COMPONENT.CLUSTERS, "Who flung that batter pudding?") + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + self.assertIsNone(get_persistent_error(COMPONENT.CLUSTERS)) + + def test__adds_warning_if_boot_images_exists_on_cluster_not_region(self): + middleware = ExternalComponentsMiddleware() + + self.patch( + middleware, + '_get_cluster_images').return_value = [make_rpc_boot_image()] + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + error = get_persistent_error(COMPONENT.IMPORT_PXE_FILES) + self.assertEqual( + normalise_whitespace( + "Your cluster currently has boot images, but your region " + "does not. Nodes will not be able to provision until you " + "import boot images into the region. Visit the " + "boot images page to start the " + "import." % reverse('images')), + error) + + def test__adds_warning_if_boot_image_import_not_started(self): + middleware = ExternalComponentsMiddleware() + + self.patch( + middleware, + '_get_cluster_images').return_value = [] + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + error = get_persistent_error(COMPONENT.IMPORT_PXE_FILES) + self.assertEqual( + normalise_whitespace( + "Boot image import process not started. Nodes will not be " + "able to provision without boot images. Visit the " + "boot images page to start the import." % ( + reverse('images'))), + error) + + def test__removes_warning_if_boot_image_process_started(self): + middleware = ExternalComponentsMiddleware() + register_persistent_error( + COMPONENT.IMPORT_PXE_FILES, + "You rotten swine, you! You have deaded me!") + + # Add a BootResource so that the middleware thinks the import + # process has started. + factory.make_BootResource() + request = factory.make_fake_request(factory.make_string(), 'GET') + middleware.process_request(request) + + error = get_persistent_error(COMPONENT.IMPORT_PXE_FILES) + self.assertIsNone(error) + + def test_get_cluster_images_calls_caches_on_first_call(self): + middleware = ExternalComponentsMiddleware() + + images = [make_rpc_boot_image()] + mock_list = self.patch(middleware_module, 'list_boot_images') + mock_list.return_value = images + self.expectThat(images, Equals(middleware._get_cluster_images())) + self.expectThat(images, Equals(middleware._cluster_images)) + self.expectThat(mock_list, MockCalledOnceWith()) + + def test_get_cluster_images_calls_doesnt_cache_on_second_call(self): + middleware = ExternalComponentsMiddleware() + + images = [make_rpc_boot_image()] + mock_list = self.patch(middleware_module, 'list_boot_images') + mock_list.return_value = images + self.expectThat(images, Equals(middleware._get_cluster_images())) + mock_list.return_value = [] + self.expectThat(images, Equals(middleware._get_cluster_images())) + self.expectThat(images, Equals(middleware._cluster_images)) + + def test_get_cluster_images_calls_cache_after_5mins(self): + middleware = ExternalComponentsMiddleware() + + images = [make_rpc_boot_image()] + mock_list = self.patch(middleware_module, 'list_boot_images') + mock_list.return_value = images + self.expectThat(images, Equals(middleware._get_cluster_images())) + + # Set the last update time 5 minutes ago, so it will + # call list_boot_images again. + middleware._cluster_images_updated -= datetime.timedelta( + minutes=5).total_seconds() + + mock_list.return_value = [] + self.expectThat([], Equals(middleware._get_cluster_images())) + self.expectThat([], Equals(middleware._cluster_images)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_migrations.py maas-1.7.6+bzr3376/src/maasserver/tests/test_migrations.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_migrations.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_migrations.py 2015-07-10 01:27:14.000000000 +0000 @@ -27,6 +27,10 @@ (2, '0002_macaddress_unique'), (39, '0039_add_filestorage_content'), (39, '0039_add_nodegroup_to_bootimage'), + (88, '0088_ip_to_custom_field'), + (88, '0088_z_backport_trunk_0099'), + (100, '0100_remove_cluster_from_bootsrouce'), + (100, '0100_remove_duplicate_bootsource_urls'), ] diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_monitor_connect.py maas-1.7.6+bzr3376/src/maasserver/tests/test_monitor_connect.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_monitor_connect.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_monitor_connect.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,59 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for timer-related signals.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + + +import random + +from maasserver.node_status import ( + get_failed_status, + MONITORED_STATUSES, + ) +from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture +from maasserver.testing.eventloop import ( + RegionEventLoopFixture, + RunningEventLoopFixture, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import MockCalledOnceWith +from mock import ANY +from provisioningserver.rpc.cluster import CancelMonitor + + +class TestCancelMonitor(MAASServerTestCase): + + def setUp(self): + super(TestCancelMonitor, self).setUp() + # Circular imports. + from maasserver import monitor_connect + self.patch(monitor_connect, 'MONITOR_CANCEL_CONNECT', True) + + def prepare_rpc(self): + self.useFixture(RegionEventLoopFixture("rpc")) + self.useFixture(RunningEventLoopFixture()) + return self.useFixture(MockLiveRegionToClusterRPCFixture()) + + def test_changing_status_of_monitored_node_cancels_related_monitor(self): + rpc_fixture = self.prepare_rpc() + status = random.choice(MONITORED_STATUSES) + node = factory.make_Node(status=status) + cluster = rpc_fixture.makeCluster(node.nodegroup, CancelMonitor) + node.status = get_failed_status(status) + node.save() + + self.assertThat( + cluster.CancelMonitor, + MockCalledOnceWith(ANY, id=node.system_id)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_networking_preseed.py maas-1.7.6+bzr3376/src/maasserver/tests/test_networking_preseed.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_networking_preseed.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_networking_preseed.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,847 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for networking preseed code.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + ] + +import json +from random import randint + +from maasserver import networking_preseed +from maasserver.dns import zonegenerator +from maasserver.enum import ( + NODEGROUP_STATUS, + NODEGROUPINTERFACE_MANAGEMENT, + ) +from maasserver.exceptions import UnresolvableHost +from maasserver.networking_preseed import ( + add_ip_to_mapping, + compose_curtin_network_preseed_for, + extract_mac_string, + extract_network_interfaces, + generate_dns_server_entry, + generate_ethernet_link_entry, + generate_network_entry, + generate_networking_config, + generate_route_entries, + get_mac_for_automatic_interfaces, + list_dns_servers, + map_gateways, + map_netmasks, + map_static_ips, + normalise_ip, + normalise_mac, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import MockCalledOnceWith +from netaddr import ( + IPAddress, + IPNetwork, + ) +from testtools.matchers import HasLength + + +def make_denormalised_mac(): + return ' %s ' % factory.make_mac_address().upper() + + +class TestExtractNetworkInterfaces(MAASServerTestCase): + + def test__returns_nothing_if_no_lshw_output_found(self): + node = factory.make_Node() + self.assertEqual([], extract_network_interfaces(node)) + + def test__returns_nothing_if_no_network_description_found_in_lshw(self): + node = factory.make_Node() + lshw_output = """ + + + + + """ + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([], extract_network_interfaces(node)) + + def test__extracts_interface_data(self): + node = factory.make_Node() + interface = factory.make_name('eth') + mac = factory.make_mac_address() + lshw_output = """ + + %(interface)s + %(mac)s + + """ % {'interface': interface, 'mac': mac} + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([(interface, mac)], extract_network_interfaces(node)) + + def test__extracts_interface_data_with_multiple_interface_format(self): + node = factory.make_Node() + interface = factory.make_name('eth') + mac = factory.make_mac_address() + lshw_output = """ + + %(interface)s + %(mac)s + + """ % {'interface': interface, 'mac': mac} + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([(interface, mac)], extract_network_interfaces(node)) + + def test__finds_network_interface_on_motherboard(self): + node = factory.make_Node() + interface = factory.make_name('eth') + mac = factory.make_mac_address() + # Stripped-down version of real lshw output: + lshw_output = """ + + + + + Motherboard + + Host bridge + + Ethernet interface + 82566DM-2 Gigabit Network Connection + Intel Corporation + %(interface)s + %(mac)s + + + + + + + + + """ % {'interface': interface, 'mac': mac} + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([(interface, mac)], extract_network_interfaces(node)) + + def test__finds_network_interface_on_pci_bus(self): + node = factory.make_Node() + interface = factory.make_name('eth') + mac = factory.make_mac_address() + # Stripped-down version of real lshw output: + lshw_output = """ + + + + + Motherboard + + Host bridge + + PCI bridge + + Ethernet interface + %(interface)s + %(mac)s + + + + + + + + + + """ % {'interface': interface, 'mac': mac} + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([(interface, mac)], extract_network_interfaces(node)) + + def test__ignores_nodes_without_interface_name(self): + node = factory.make_Node() + mac = factory.make_mac_address() + lshw_output = """ + + %s + + """ % mac + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([], extract_network_interfaces(node)) + + def test__ignores_nodes_without_mac(self): + node = factory.make_Node() + interface = factory.make_name('eth') + lshw_output = """ + + %s + + """ % interface + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + self.assertEqual([], extract_network_interfaces(node)) + + def test__normalises_mac(self): + node = factory.make_Node() + interface = factory.make_name('eth') + mac = make_denormalised_mac() + self.assertNotEqual(normalise_mac(mac), mac) + lshw_output = """ + + %(interface)s + %(mac)s + + """ % {'interface': interface, 'mac': mac} + factory.make_NodeResult_for_commissioning( + node=node, name='00-maas-01-lshw.out', script_result=0, + data=lshw_output.encode('ascii')) + [entry] = extract_network_interfaces(node) + _, extracted_mac = entry + self.assertEqual(normalise_mac(mac), extracted_mac) + + +class TestNormaliseMAC(MAASServerTestCase): + + def test__normalises_case(self): + mac = factory.make_mac_address() + self.assertEqual( + normalise_mac(mac.lower()), + normalise_mac(mac.upper())) + + def test__strips_whitespace(self): + mac = factory.make_mac_address() + self.assertEqual( + normalise_mac(mac), + normalise_mac(' %s ' % mac)) + + def test__is_idempotent(self): + mac = factory.make_mac_address() + self.assertEqual( + normalise_mac(mac), + normalise_mac(normalise_mac(mac))) + + +class TestNormaliseIP(MAASServerTestCase): + + def test__normalises_case(self): + ip = factory.make_ipv6_address() + self.assertEqual( + normalise_ip(ip.upper()), + normalise_ip(ip.lower())) + + def test__strips_whitespace(self): + ip = factory.make_ipv4_address() + self.assertEqual(ip, normalise_ip(' %s ' % ip)) + + def test__normalises_zeroes(self): + self.assertEqual('::1', normalise_ip('0000:000:00:0::1')) + + def test__accepts_bytes(self): + ip = factory.make_ipv6_address() + self.assertEqual(normalise_ip(ip), normalise_ip(ip.encode('ascii'))) + + def test__is_idempotent(self): + ip = factory.make_ipv6_address() + self.assertEqual(normalise_ip(ip), normalise_ip(normalise_ip(ip))) + + +class TestGenerateEthernetLinkEntry(MAASServerTestCase): + + def test__generates_dict(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + self.assertEqual( + { + 'id': interface, + 'type': 'phy', + 'ethernet_mac_address': mac, + }, + generate_ethernet_link_entry(interface, mac)) + + +class TestGenerateDNServerEntry(MAASServerTestCase): + + def test__returns_dict(self): + address = factory.make_ipv4_address() + self.assertEqual( + { + 'type': 'dns', + 'address': address, + }, + generate_dns_server_entry(address)) + + +def patch_dns_servers(testcase, ipv4_dns=None, ipv6_dns=None): + """Patch `get_dns_server_address` to return the given addresses. + + The fake will return `ipv4_dns` or `ipv6_dns` as appropriate to the + arguments. For that reason, this patch does not use a `Mock`. + """ + + def fake_get_maas_facing_server_address(cluster, ipv4=True, ipv6=True): + result = None + if ipv4: + result = ipv4_dns + if result is None and ipv6: + result = ipv6_dns + if result is None: + raise UnresolvableHost() + return result + + testcase.patch( + zonegenerator, 'get_maas_facing_server_address', + fake_get_maas_facing_server_address) + testcase.patch(zonegenerator, 'warn_loopback') + + +class ListDNSServers(MAASServerTestCase): + + def test__includes_ipv4_and_ipv6_by_default(self): + ipv4_dns = factory.make_ipv4_address() + ipv6_dns = factory.make_ipv6_address() + patch_dns_servers(self, ipv4_dns=ipv4_dns, ipv6_dns=ipv6_dns) + node = factory.make_Node(disable_ipv4=False) + self.assertItemsEqual([ipv4_dns, ipv6_dns], list_dns_servers(node)) + + def test__omits_ipv4_if_disabled_for_node(self): + ipv4_dns = factory.make_ipv4_address() + ipv6_dns = factory.make_ipv6_address() + patch_dns_servers(self, ipv4_dns=ipv4_dns, ipv6_dns=ipv6_dns) + node = factory.make_Node(disable_ipv4=True) + self.assertItemsEqual([ipv6_dns], list_dns_servers(node)) + + def test__omits_ipv4_if_unvailable(self): + ipv6_dns = factory.make_ipv6_address() + patch_dns_servers(self, ipv6_dns=ipv6_dns) + node = factory.make_Node(disable_ipv4=False) + self.assertItemsEqual([ipv6_dns], list_dns_servers(node)) + + def test__omits_ipv6_if_unavailable(self): + ipv4_dns = factory.make_ipv4_address() + patch_dns_servers(self, ipv4_dns=ipv4_dns) + node = factory.make_Node(disable_ipv4=False) + self.assertItemsEqual([ipv4_dns], list_dns_servers(node)) + + +def make_cluster_interface(network=None, **kwargs): + return factory.make_NodeGroupInterface( + factory.make_NodeGroup(), network=network, **kwargs) + + +class TestGenerateRouteEntries(MAASServerTestCase): + + def test__generates_IPv4_default_route_if_available(self): + network = factory.make_ipv4_network() + router = factory.pick_ip_in_network(network) + cluster_interface = make_cluster_interface(network, router_ip=router) + self.assertEqual( + [ + { + 'network': '0.0.0.0', + 'netmask': '0.0.0.0', + 'gateway': unicode(router), + }, + ], + generate_route_entries(cluster_interface)) + + def test__generates_IPv6_default_route_if_available(self): + network = factory.make_ipv6_network() + router = factory.pick_ip_in_network(network) + cluster_interface = make_cluster_interface(network, router_ip=router) + self.assertEqual( + [ + { + 'network': '::', + 'netmask': '::', + 'gateway': unicode(router), + }, + ], + generate_route_entries(cluster_interface)) + + def test__generates_empty_list_if_no_route_available(self): + network = factory.make_ipv4_network() + cluster_interface = make_cluster_interface( + network, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, + router_ip='') + self.assertEqual([], generate_route_entries(cluster_interface)) + + +class TestGenerateNetworkEntry(MAASServerTestCase): + + def test__generates_IPv4_dict(self): + network = factory.make_ipv4_network() + network_interface = factory.make_name('eth') + cluster_interface = make_cluster_interface(network) + ip = factory.pick_ip_in_network(network) + + entry = generate_network_entry( + network_interface, cluster_interface, ip=ip) + + del entry['routes'] + self.assertEqual( + { + 'type': 'ipv4', + 'link': network_interface, + 'ip_address': unicode(ip), + 'netmask': unicode(network.netmask), + }, + entry) + + def test__generates_IPv6_dict(self): + slash = randint(48, 64) + network = factory.make_ipv6_network(slash=slash) + network_interface = factory.make_name('eth') + cluster_interface = make_cluster_interface(network) + ip = factory.pick_ip_in_network(network) + + entry = generate_network_entry( + network_interface, cluster_interface, ip=ip) + + del entry['routes'] + self.assertEqual( + { + 'type': 'ipv6', + 'link': network_interface, + 'ip_address': '%s/%d' % (ip, slash), + }, + entry) + + def test__omits_IP_if_not_given(self): + network = factory.make_ipv4_network() + network_interface = factory.make_name('eth') + cluster_interface = make_cluster_interface(network) + + entry = generate_network_entry(network_interface, cluster_interface) + + del entry['routes'] + self.assertEqual( + { + 'type': 'ipv4', + 'link': network_interface, + 'netmask': unicode(network.netmask), + }, + entry) + + def test__tells_IPv4_from_IPv6_even_without_IP(self): + cluster_interface = make_cluster_interface(factory.make_ipv6_network()) + entry = generate_network_entry( + factory.make_name('eth'), cluster_interface) + self.assertEqual('ipv6', entry['type']) + + def test__includes_IPv4_routes_on_IPv4_network(self): + network = factory.make_ipv4_network() + router = factory.pick_ip_in_network(network) + cluster_interface = make_cluster_interface( + network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + router_ip=router) + + entry = generate_network_entry( + factory.make_name('eth'), cluster_interface) + + self.assertThat(entry['routes'], HasLength(1)) + [route] = entry['routes'] + self.assertEqual(unicode(router), route['gateway']) + + def test__includes_IPv6_routes_on_IPv6_network(self): + network = factory.make_ipv6_network() + router = factory.pick_ip_in_network(network) + cluster_interface = make_cluster_interface( + network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, + router_ip=router) + + entry = generate_network_entry( + factory.make_name('eth'), cluster_interface) + + self.assertThat(entry['routes'], HasLength(1)) + [route] = entry['routes'] + self.assertEqual(unicode(router), route['gateway']) + + +class TestGenerateNetworkingConfig(MAASServerTestCase): + + def patch_interfaces(self, interface_mac_pairs): + patch = self.patch_autospec( + networking_preseed, 'extract_network_interfaces') + patch.return_value = interface_mac_pairs + return patch + + def test__returns_config_dict(self): + self.patch_interfaces([]) + patch_dns_servers(self) + config = generate_networking_config(factory.make_Node()) + self.assertIsInstance(config, dict) + self.assertEqual("MAAS", config['provider']) + + def test__includes_links(self): + patch_dns_servers(self) + node = factory.make_Node() + interface = factory.make_name('eth') + mac = factory.make_mac_address() + patch = self.patch_interfaces([(interface, mac)]) + + config = generate_networking_config(node) + + self.assertThat(patch, MockCalledOnceWith(node)) + self.assertEqual( + [ + { + 'id': interface, + 'type': 'phy', + 'ethernet_mac_address': mac, + }, + ], + config['network_info']['links']) + + def test__includes_networks(self): + # This section is not yet implemented, so expect an empty list. + patch_dns_servers(self) + self.patch_interfaces([]) + config = generate_networking_config(factory.make_Node()) + self.assertEqual([], config['network_info']['networks']) + + def test__includes_dns_servers(self): + dns_address = factory.make_ipv4_address() + patch_dns_servers(self, dns_address) + self.patch_interfaces([]) + config = generate_networking_config( + factory.make_Node(disable_ipv4=False)) + self.assertEqual( + [ + { + 'type': 'dns', + 'address': dns_address, + }, + ], + config['network_info']['services']) + + +class TestExtractMACString(MAASServerTestCase): + + def test__returns_string(self): + self.assertIsInstance( + extract_mac_string(factory.make_MACAddress_with_Node()), + unicode) + + def test__returns_MAC_address(self): + mac = factory.make_mac_address() + self.assertEqual( + normalise_mac(mac), + extract_mac_string(factory.make_MACAddress_with_Node(address=mac))) + + def test__works_even_if_mac_address_is_already_string(self): + # The ORM normally presents MACAddress.mac_address as a MAC object. + # But a string will work too. + mac_string = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() + mac.mac_address = mac_string + self.assertIsInstance(mac.mac_address, unicode) + self.assertEqual(normalise_mac(mac_string), extract_mac_string(mac)) + + +class TestAddIPToMapping(MAASServerTestCase): + + def test__adds_to_empty_entry(self): + mac = factory.make_MACAddress_with_Node() + ip = factory.make_ipv4_address() + mapping = {} + add_ip_to_mapping(mapping, mac, ip) + self.assertEqual({mac.mac_address: [ip]}, mapping) + + def test__adds_to_nonempty_entry(self): + mapping = {} + mac = factory.make_MACAddress_with_Node() + ip1 = factory.make_ipv4_address() + add_ip_to_mapping(mapping, mac, ip1) + ip2 = factory.make_ipv4_address() + add_ip_to_mapping(mapping, mac, ip2) + self.assertItemsEqual([ip1, ip2], mapping[mac.mac_address]) + + def test__will_not_add_duplicate(self): + mac = factory.make_MACAddress_with_Node() + ip = factory.make_ipv4_address() + mapping = {mac.mac_address: [ip]} + original_mapping = mapping.copy() + add_ip_to_mapping(mapping, mac, ip) + self.assertEqual(original_mapping, mapping) + + def test__does_not_add_None(self): + mac = factory.make_MACAddress_with_Node() + mapping = {} + add_ip_to_mapping(mapping, mac, None) + self.assertEqual({}, mapping) + + def test__does_not_add_empty_string(self): + mac = factory.make_MACAddress_with_Node() + mapping = {} + add_ip_to_mapping(mapping, mac, '') + self.assertEqual({}, mapping) + + +class TestMapStaticIPs(MAASServerTestCase): + + def test__returns_empty_if_none_found(self): + self.assertEqual({}, map_static_ips(factory.make_Node())) + + def test__finds_IPv4_address(self): + node = factory.make_Node() + mac = factory.make_MACAddress(node=node) + ip = factory.make_ipv4_address() + factory.make_StaticIPAddress(ip=ip, mac=mac) + self.assertEqual( + {mac.mac_address: [ip]}, + map_static_ips(node)) + + def test__finds_IPv6_address(self): + node = factory.make_Node() + mac = factory.make_MACAddress(node=node) + ip = factory.make_ipv6_address() + factory.make_StaticIPAddress(ip=ip, mac=mac) + self.assertEqual( + {mac.mac_address: [ip]}, + map_static_ips(node)) + + def test__finds_addresses_on_multiple_MACs(self): + node = factory.make_Node() + mac1 = factory.make_MACAddress(node=node) + mac2 = factory.make_MACAddress(node=node) + ip1 = factory.make_ipv4_address() + factory.make_StaticIPAddress(ip=ip1, mac=mac1) + ip2 = factory.make_ipv4_address() + factory.make_StaticIPAddress(ip=ip2, mac=mac2) + self.assertEqual( + { + mac1.mac_address: [ip1], + mac2.mac_address: [ip2], + }, + map_static_ips(node)) + + def test__finds_multiple_addresses_on_MAC(self): + node = factory.make_Node() + mac = factory.make_MACAddress(node=node) + ipv4 = factory.make_ipv4_address() + ipv6 = factory.make_ipv6_address() + factory.make_StaticIPAddress(ip=ipv4, mac=mac) + factory.make_StaticIPAddress(ip=ipv6, mac=mac) + mapping = map_static_ips(node) + self.assertItemsEqual([mac.mac_address], mapping.keys()) + self.assertItemsEqual([ipv4, ipv6], mapping[mac.mac_address]) + + +class TestMapGateways(MAASServerTestCase): + + def test__returns_empty_if_none_found(self): + self.assertEqual({}, map_gateways(factory.make_Node())) + + def test__finds_IPv4_gateway(self): + network = factory.make_ipv4_network(slash=24) + gateway = factory.pick_ip_in_network(network) + cluster = factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + cluster_interface = factory.make_NodeGroupInterface( + cluster, network=network, router_ip=gateway, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + node = factory.make_Node(nodegroup=cluster) + mac = factory.make_MACAddress_with_Node( + node=node, cluster_interface=cluster_interface) + + self.assertEqual( + {mac.mac_address: [gateway]}, + map_gateways(node)) + + def test__finds_IPv6_gateway(self): + network = factory.make_ipv6_network() + gateway = factory.pick_ip_in_network(network) + cluster = factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + net_interface = factory.make_name('eth') + ipv4_interface = factory.make_NodeGroupInterface( + cluster, interface=net_interface, + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + factory.make_NodeGroupInterface( + cluster, network=network, router_ip=gateway, + interface=net_interface, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + node = factory.make_Node(nodegroup=cluster) + mac = factory.make_MACAddress_with_Node( + node=node, cluster_interface=ipv4_interface) + + self.assertEqual( + {mac.mac_address: [gateway]}, + map_gateways(node)) + + def test__finds_gateways_on_multiple_MACs(self): + cluster = factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + node = factory.make_Node(nodegroup=cluster) + network1 = factory.make_ipv4_network(slash=24) + gateway1 = factory.pick_ip_in_network(network1) + cluster_interface1 = factory.make_NodeGroupInterface( + cluster, network=network1, router_ip=gateway1, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + mac1 = factory.make_MACAddress_with_Node( + node=node, cluster_interface=cluster_interface1) + network2 = factory.make_ipv4_network(slash=24) + gateway2 = factory.pick_ip_in_network(network2) + cluster_interface2 = factory.make_NodeGroupInterface( + cluster, network=network2, router_ip=gateway2, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + mac2 = factory.make_MACAddress_with_Node( + node=node, cluster_interface=cluster_interface2) + + self.assertEqual( + { + mac1.mac_address: [gateway1], + mac2.mac_address: [gateway2], + }, + map_gateways(node)) + + def test__finds_multiple_gateways_on_MAC(self): + cluster = factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + net_interface = factory.make_name('eth') + ipv4_network = factory.make_ipv4_network(slash=24) + ipv4_gateway = factory.pick_ip_in_network(ipv4_network) + ipv4_interface = factory.make_NodeGroupInterface( + cluster, network=ipv4_network, router_ip=ipv4_gateway, + interface=net_interface, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + ipv6_network = factory.make_ipv6_network() + ipv6_gateway = factory.pick_ip_in_network(ipv6_network) + factory.make_NodeGroupInterface( + cluster, network=ipv6_network, router_ip=ipv6_gateway, + interface=net_interface, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + node = factory.make_Node(nodegroup=cluster) + mac = factory.make_MACAddress_with_Node( + node=node, cluster_interface=ipv4_interface) + + mapping = map_gateways(node) + + self.assertItemsEqual([mac.mac_address], mapping.keys()) + self.assertItemsEqual( + [ipv4_gateway, ipv6_gateway], + mapping[mac.mac_address]) + + +class TestGetMACForAutomaticInterfaces(MAASServerTestCase): + + def test__uses_pxe_mac(self): + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + mac = node.get_primary_mac() + node.pxe_mac = mac + result = get_mac_for_automatic_interfaces(node) + self.assertEqual(result, extract_mac_string(mac)) + + +class TestMapNetmasks(MAASServerTestCase): + + def test__maps_ipv4_netmask(self): + network = factory.make_ipv4_network() + netmask = unicode(IPNetwork(network).netmask) + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network) + mac = node.get_primary_mac() + ip = factory.pick_ip_in_network(network) + factory.make_StaticIPAddress(mac=mac, ip=ip) + self.assertEqual({normalise_ip(ip): netmask}, map_netmasks(node)) + + def test__maps_ipv6_netmask_as_prefix_bits(self): + network = factory.make_ipv6_network(slash=randint(16, 127)) + netmask = '%d' % IPNetwork(network).prefixlen + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + [ipv4_interface] = node.nodegroup.nodegroupinterface_set.all() + factory.make_NodeGroupInterface( + node.nodegroup, network=network, + interface=ipv4_interface.interface) + mac = node.get_primary_mac() + ip = factory.pick_ip_in_network(network) + factory.make_StaticIPAddress(mac=mac, ip=ip) + self.assertEqual({normalise_ip(ip): netmask}, map_netmasks(node)) + + def test__ignores_network_interface_without_cluster_interface(self): + network = factory.make_ipv4_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network) + mac = node.get_primary_mac() + mac.cluster_interface = None + mac.save() + ip = factory.pick_ip_in_network(network) + factory.make_StaticIPAddress(mac=mac, ip=ip) + self.assertEqual({}, map_netmasks(node)) + + def test__ignores_network_interface_without_static_IP(self): + network = factory.make_ipv4_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network) + self.assertEqual({}, map_netmasks(node)) + + +class TestComposeCurtinNetworkPreseedFor(MAASServerTestCase): + + def test__composes_config(self): + fake = self.patch_autospec( + networking_preseed, 'compose_curtin_network_preseed') + fake.return_value = [] + node = factory.make_Node(disable_ipv4=False) + node.nodegroup.accept() + network = factory.make_ipv4_network(slash=16) + router = factory.pick_ip_in_network(network) + dns = factory.make_ipv4_address() + patch_dns_servers(self, dns) + static_low = unicode(IPAddress(network.first + 1)) + static_high = unicode(IPAddress(network.first + 2)) + dyn_low = unicode(IPAddress(network.first + 3)) + dyn_high = unicode(IPAddress(network.first + 4)) + interface = factory.make_NodeGroupInterface( + node.nodegroup, network=network, router_ip=router, + static_ip_range_low=static_low, static_ip_range_high=static_high, + ip_range_low=dyn_low, ip_range_high=dyn_high, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + mac = factory.make_MACAddress(node=node, cluster_interface=interface) + extract_interfaces = self.patch_autospec( + networking_preseed, 'extract_network_interfaces') + extract_interfaces.return_value = [mac.mac_address] + + compose_curtin_network_preseed_for(node) + + expected_config = { + 'interfaces': [mac.mac_address], + 'auto_interfaces': [mac.mac_address], + 'ips_mapping': {}, + 'gateways_mapping': {mac.mac_address: [router]}, + 'nameservers': [dns], + 'netmasks': {}, + } + self.assertThat(fake, MockCalledOnceWith(node, expected_config)) + + def test__returns_preseeds_as_list_of_text(self): + fake = self.patch_autospec( + networking_preseed, 'compose_curtin_network_preseed') + preseed_data = {factory.make_name('key'): factory.make_name('value')} + fake.return_value = [preseed_data] + node = factory.make_Node(mac=True) + + preseed = compose_curtin_network_preseed_for(node) + + self.assertIsInstance(preseed, list) + [data] = preseed + self.assertEqual(preseed_data, json.loads(data)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_node_action.py maas-1.7.6+bzr3376/src/maasserver/tests/test_node_action.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_node_action.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_node_action.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for node actions.""" @@ -14,29 +14,52 @@ __metaclass__ = type __all__ = [] +import random from urlparse import urlparse from django.core.urlresolvers import reverse +from maasserver import locks +from maasserver.clusterrpc.utils import get_error_message_for_exception from maasserver.enum import ( + NODE_BOOT, NODE_PERMISSION, NODE_STATUS, + NODE_STATUS_CHOICES, NODE_STATUS_CHOICES_DICT, + POWER_STATE, ) -from maasserver.exceptions import Redirect -from maasserver.models import Tag +from maasserver.exceptions import ( + NodeActionError, + Redirect, + ) +from maasserver.models import StaticIPAddress from maasserver.node_action import ( + AbortCommissioning, + AbortOperation, + AcquireNode, Commission, compile_node_actions, Delete, + MarkBroken, + MarkFixed, NodeAction, + ReleaseNode, + RPC_EXCEPTIONS, StartNode, StopNode, UseCurtin, UseDI, ) +from maasserver.node_status import FAILED_STATUSES from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase -from provisioningserver.power.poweraction import PowerAction +from maastesting.matchers import MockCalledOnceWith +from mock import ANY +from provisioningserver.rpc.exceptions import MultipleFailures +from provisioningserver.utils.shell import ExternalProcessError +from testtools.matchers import Equals +from twisted.python.failure import Failure ALL_STATUSES = NODE_STATUS_CHOICES_DICT.keys() @@ -64,10 +87,10 @@ def test_compile_node_actions_returns_available_actions(self): class MyAction(FakeNodeAction): - name = factory.getRandomString() + name = factory.make_string() actions = compile_node_actions( - factory.make_node(), factory.make_admin(), classes=[MyAction]) + factory.make_Node(), factory.make_admin(), classes=[MyAction]) self.assertEqual([MyAction.name], actions.keys()) def test_compile_node_actions_checks_node_status(self): @@ -75,7 +98,7 @@ class MyAction(FakeNodeAction): actionable_statuses = (NODE_STATUS.READY, ) - node = factory.make_node(status=NODE_STATUS.DECLARED) + node = factory.make_Node(status=NODE_STATUS.NEW) actions = compile_node_actions( node, factory.make_admin(), classes=[MyAction]) self.assertEqual({}, actions) @@ -85,41 +108,41 @@ class MyAction(FakeNodeAction): permission = NODE_PERMISSION.EDIT - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) actions = compile_node_actions( - node, factory.make_user(), classes=[MyAction]) + node, factory.make_User(), classes=[MyAction]) self.assertEqual({}, actions) def test_compile_node_actions_includes_inhibited_actions(self): class MyAction(FakeNodeAction): - fake_inhibition = factory.getRandomString() + fake_inhibition = factory.make_string() actions = compile_node_actions( - factory.make_node(), factory.make_admin(), classes=[MyAction]) + factory.make_Node(), factory.make_admin(), classes=[MyAction]) self.assertEqual([MyAction.name], actions.keys()) def test_compile_node_actions_maps_names(self): class Action1(FakeNodeAction): - name = factory.getRandomString() + name = factory.make_string() class Action2(FakeNodeAction): - name = factory.getRandomString() + name = factory.make_string() actions = compile_node_actions( - factory.make_node(), factory.make_admin(), + factory.make_Node(), factory.make_admin(), classes=[Action1, Action2]) for name, action in actions.items(): self.assertEqual(name, action.name) def test_compile_node_actions_maintains_order(self): - names = [factory.getRandomString() for counter in range(4)] + names = [factory.make_string() for counter in range(4)] classes = [ type(b"Action%d" % counter, (FakeNodeAction,), {'name': name}) for counter, name in enumerate(names)] actions = compile_node_actions( - factory.make_node(), factory.make_admin(), classes=classes) + factory.make_Node(), factory.make_admin(), classes=classes) self.assertSequenceEqual(names, actions.keys()) self.assertSequenceEqual( names, [action.name for action in actions.values()]) @@ -129,8 +152,8 @@ class MyAction(FakeNodeAction): permission = NODE_PERMISSION.EDIT - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) + node = factory.make_Node( + status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) self.assertTrue(MyAction(node, node.owner).is_permitted()) def test_is_permitted_disallows_if_user_lacks_permission(self): @@ -138,13 +161,13 @@ class MyAction(FakeNodeAction): permission = NODE_PERMISSION.EDIT - node = factory.make_node( - status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) - self.assertFalse(MyAction(node, factory.make_user()).is_permitted()) + node = factory.make_Node( + status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) + self.assertFalse(MyAction(node, factory.make_User()).is_permitted()) def test_inhibition_wraps_inhibit(self): - inhibition = factory.getRandomString() - action = FakeNodeAction(factory.make_node(), factory.make_user()) + inhibition = factory.make_string() + action = FakeNodeAction(factory.make_Node(), factory.make_User()) action.fake_inhibition = inhibition self.assertEqual(inhibition, action.inhibition) @@ -152,41 +175,28 @@ # The inhibition property will call inhibit() only once. We can # prove this by changing the string inhibit() returns; it won't # affect the value of the property. - inhibition = factory.getRandomString() - action = FakeNodeAction(factory.make_node(), factory.make_user()) + inhibition = factory.make_string() + action = FakeNodeAction(factory.make_Node(), factory.make_User()) action.fake_inhibition = inhibition self.assertEqual(inhibition, action.inhibition) - action.fake_inhibition = factory.getRandomString() + action.fake_inhibition = factory.make_string() self.assertEqual(inhibition, action.inhibition) def test_inhibition_caches_None(self): # An inhibition of None is also faithfully cached. In other # words, it doesn't get mistaken for an uninitialized cache or # anything. - action = FakeNodeAction(factory.make_node(), factory.make_user()) + action = FakeNodeAction(factory.make_Node(), factory.make_User()) action.fake_inhibition = None self.assertIsNone(action.inhibition) - action.fake_inhibition = factory.getRandomString() + action.fake_inhibition = factory.make_string() self.assertIsNone(action.inhibition) class TestDeleteNodeAction(MAASServerTestCase): - def test_Delete_inhibit_when_node_is_allocated(self): - node = factory.make_node(status=NODE_STATUS.ALLOCATED) - action = Delete(node, factory.make_admin()) - inhibition = action.inhibit() - self.assertEqual( - "You cannot delete this node because it's in use.", inhibition) - - def test_Delete_does_not_inhibit_otherwise(self): - node = factory.make_node(status=NODE_STATUS.FAILED_TESTS) - action = Delete(node, factory.make_admin()) - inhibition = action.inhibit() - self.assertIsNone(inhibition) - def test_Delete_redirects_to_node_delete_view(self): - node = factory.make_node() + node = factory.make_Node() action = Delete(node, factory.make_admin()) try: action.execute() @@ -199,144 +209,437 @@ class TestCommissionNodeAction(MAASServerTestCase): + scenarios = ( + ("NEW", {"status": NODE_STATUS.NEW}), + ("FAILED_COMMISSIONING", { + "status": NODE_STATUS.FAILED_COMMISSIONING}), + ("READY", {"status": NODE_STATUS.READY}), + ) + def test_Commission_starts_commissioning(self): - statuses = ( - NODE_STATUS.DECLARED, NODE_STATUS.FAILED_TESTS, - NODE_STATUS.READY) - for status in statuses: - node = factory.make_node( - mac=True, status=status, - power_type='ether_wake') - action = Commission(node, factory.make_admin()) - action.execute() - self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) - self.assertEqual( - 'provisioningserver.tasks.power_on', - self.celery.tasks[0]['task'].name) + node = factory.make_Node( + mac=True, status=self.status, + power_type='ether_wake') + self.patch_autospec(node, 'start_transition_monitor') + node_start = self.patch(node, 'start') + admin = factory.make_admin() + action = Commission(node, admin) + action.execute() + self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) + self.assertThat( + node_start, MockCalledOnceWith(admin, user_data=ANY)) + + +class TestAbortCommissioningNodeAction(MAASServerTestCase): + + def test_AbortCommissioning_aborts_commissioning(self): + node = factory.make_Node( + mac=True, status=NODE_STATUS.COMMISSIONING, + power_type='virsh') + self.patch_autospec(node, 'stop_transition_monitor') + node_stop = self.patch_autospec(node, 'stop') + admin = factory.make_admin() + + AbortCommissioning(node, admin).execute() + self.assertEqual(NODE_STATUS.NEW, node.status) + self.assertThat(node_stop, MockCalledOnceWith(admin)) + + +class TestAbortOperationNodeAction(MAASServerTestCase): + + def test_AbortOperation_aborts_disk_erasing(self): + owner = factory.make_User() + node = factory.make_Node( + status=NODE_STATUS.DISK_ERASING, owner=owner) + node_stop = self.patch_autospec(node, 'stop') + + AbortOperation(node, owner).execute() + + self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) + self.assertThat(node_stop, MockCalledOnceWith(owner)) + + +class TestAcquireNodeNodeAction(MAASServerTestCase): + + def test_AcquireNode_acquires_node(self): + node = factory.make_Node( + mac=True, status=NODE_STATUS.READY, + power_type='ether_wake') + user = factory.make_User() + AcquireNode(node, user).execute() + self.assertEqual(NODE_STATUS.ALLOCATED, node.status) + self.assertEqual(user, node.owner) + + def test_AcquireNode_uses_node_acquire_lock(self): + node = factory.make_Node( + mac=True, status=NODE_STATUS.READY, + power_type='ether_wake') + user = factory.make_User() + node_acquire = self.patch(locks, 'node_acquire') + AcquireNode(node, user).execute() + self.assertThat(node_acquire.__enter__, MockCalledOnceWith()) + self.assertThat( + node_acquire.__exit__, MockCalledOnceWith(None, None, None)) class TestStartNodeNodeAction(MAASServerTestCase): def test_StartNode_inhibit_allows_user_with_SSH_key(self): - user_with_key = factory.make_user() - factory.make_sshkey(user_with_key) + user_with_key = factory.make_User() + factory.make_SSHKey(user_with_key) self.assertIsNone( - StartNode(factory.make_node(), user_with_key).inhibit()) + StartNode(factory.make_Node(), user_with_key).inhibit()) def test_StartNode_inhibit_disallows_user_without_SSH_key(self): - user_without_key = factory.make_user() - action = StartNode(factory.make_node(), user_without_key) + user_without_key = factory.make_User() + action = StartNode(factory.make_Node(), user_without_key) inhibition = action.inhibit() self.assertIsNotNone(inhibition) self.assertIn("SSH key", inhibition) - def test_StartNode_acquires_and_starts_node(self): - node = factory.make_node( - mac=True, status=NODE_STATUS.READY, - power_type='ether_wake') - user = factory.make_user() + def test_StartNode_starts_node(self): + user = factory.make_User() + node = factory.make_Node( + mac=True, status=NODE_STATUS.ALLOCATED, + power_type='ether_wake', owner=user) + node_start = self.patch(node, 'start') StartNode(node, user).execute() + self.assertThat( + node_start, MockCalledOnceWith(user)) + + def test_StartNode_returns_error_when_no_more_static_IPs(self): + user = factory.make_User() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user, + power_state=POWER_STATE.OFF) + ngi = node.get_primary_mac().cluster_interface + + # Narrow the available IP range and pre-claim the only address. + ngi.static_ip_range_high = ngi.static_ip_range_low + ngi.save() + StaticIPAddress.objects.allocate_new( + ngi.static_ip_range_high, ngi.static_ip_range_low) + + e = self.assertRaises(NodeActionError, StartNode(node, user).execute) + self.expectThat( + e.message, Equals( + "%s: Failed to start, static IP addresses are exhausted." % + node.hostname)) self.assertEqual(NODE_STATUS.ALLOCATED, node.status) + + def test_StartNode_requires_edit_permission(self): + user = factory.make_User() + node = factory.make_Node() + self.assertFalse( + user.has_perm(NODE_PERMISSION.EDIT, node)) + self.assertFalse(StartNode(node, user).is_permitted()) + + def test_StartNode_allocates_node_if_node_not_already_allocated(self): + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.READY) + self.patch(node, 'start') + action = StartNode(node, user) + action.execute() + self.assertEqual(user, node.owner) - self.assertEqual( - 'provisioningserver.tasks.power_on', - self.celery.tasks[0]['task'].name) + self.assertEqual(NODE_STATUS.ALLOCATED, node.status) + + def test_StartNode_label_shows_allocate_if_unallocated(self): + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.READY) + self.patch(node, 'start') + action = StartNode(node, user) + self.assertEqual("Acquire and start node", action.display) + + def test_StartNode_label_hides_allocate_if_allocated(self): + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.READY) + self.patch(node, 'start') + node.acquire(user) + action = StartNode(node, user) + self.assertEqual("Start node", action.display) + + def test_StartNode_label_hides_acquire_for_non_owner_admin(self): + user = factory.make_User() + admin = factory.make_admin() + node = factory.make_Node(status=NODE_STATUS.READY) + node.acquire(user) + action = StartNode(node, admin) + self.assertEqual("Start node", action.display) + + def test_StartNode_does_not_reallocate_when_run_by_non_owner(self): + user = factory.make_User() + admin = factory.make_admin() + node = factory.make_Node(status=NODE_STATUS.READY) + self.patch(node, 'start') + node.acquire(user) + action = StartNode(node, admin) + + # This action.execute() will not fail because the non-owner is + # an admin, so they can start the node. Even if they weren't an + # admin, the node still wouldn't start; Node.start() would + # ignore it. + action.execute() + self.assertEqual(user, node.owner) + self.assertEqual(NODE_STATUS.ALLOCATED, node.status) class TestStopNodeNodeAction(MAASServerTestCase): - def test_StopNode_stops_and_releases_node(self): - self.patch(PowerAction, 'run_shell', lambda *args, **kwargs: ('', '')) - user = factory.make_user() + def test__stops_deployed_node(self): + user = factory.make_User() params = dict( - power_address=factory.getRandomString(), - power_user=factory.getRandomString(), - power_pass=factory.getRandomString()) - node = factory.make_node( - mac=True, status=NODE_STATUS.ALLOCATED, + power_address=factory.make_string(), + power_user=factory.make_string(), + power_pass=factory.make_string()) + node = factory.make_Node( + mac=True, status=NODE_STATUS.DEPLOYED, power_type='ipmi', owner=user, power_parameters=params) + self.patch(node, 'start_transition_monitor') + node_stop = self.patch_autospec(node, 'stop') + StopNode(node, user).execute() - self.assertEqual(NODE_STATUS.READY, node.status) - self.assertIsNone(node.owner) - self.assertEqual( - 'provisioningserver.tasks.power_off', - self.celery.tasks[0]['task'].name) + self.assertThat(node_stop, MockCalledOnceWith(user)) + + def test__stops_Ready_node(self): + admin = factory.make_admin() + params = dict( + power_address=factory.make_string(), + power_user=factory.make_string(), + power_pass=factory.make_string()) + node = factory.make_Node( + mac=True, status=NODE_STATUS.READY, + power_type='ipmi', power_parameters=params) + node_stop = self.patch_autospec(node, 'stop') + + StopNode(node, admin).execute() + self.assertThat(node_stop, MockCalledOnceWith(admin)) -def make_use_fastpath_installer_tag_with_expression(): - Tag.objects.get_or_create( - name="use-fastpath-installer", definition="true()") + def test__actionnable_for_failed_states(self): + status = random.choice(FAILED_STATUSES) + node = factory.make_Node(status=status, power_type='ipmi') + actions = compile_node_actions( + node, factory.make_admin(), classes=[StopNode]) + self.assertItemsEqual([StopNode.name], actions) + + +ACTIONABLE_STATUSES = [ + NODE_STATUS.DEPLOYING, + NODE_STATUS.FAILED_DEPLOYMENT, + NODE_STATUS.FAILED_DISK_ERASING, +] + + +class TestReleaseNodeNodeAction(MAASServerTestCase): + + scenarios = [ + (NODE_STATUS_CHOICES_DICT[status], dict(actionable_status=status)) + for status in ACTIONABLE_STATUSES + ] + + def test_ReleaseNode_stops_and_releases_node(self): + user = factory.make_User() + params = dict( + power_address=factory.make_string(), + power_user=factory.make_string(), + power_pass=factory.make_string()) + node = factory.make_Node( + mac=True, status=self.actionable_status, + power_type='ipmi', power_state=POWER_STATE.ON, + owner=user, power_parameters=params) + self.patch(node, 'start_transition_monitor') + node_stop = self.patch_autospec(node, 'stop') + + ReleaseNode(node, user).execute() + + self.expectThat(node.status, Equals(NODE_STATUS.RELEASING)) + self.assertThat( + node_stop, MockCalledOnceWith(user)) class TestUseCurtinNodeAction(MAASServerTestCase): - def test_sets_tag(self): - user = factory.make_user() - node = factory.make_node(owner=user) - node.use_traditional_installer() + def test_sets_boot_type(self): + user = factory.make_User() + node = factory.make_Node(owner=user, boot_type=NODE_BOOT.DEBIAN) action = UseCurtin(node, user) self.assertTrue(action.is_permitted()) action.execute() - self.assertTrue(node.should_use_fastpath_installer()) + self.assertEqual(NODE_BOOT.FASTPATH, node.boot_type) def test_requires_edit_permission(self): - user = factory.make_user() - node = factory.make_node() - node.use_traditional_installer() + user = factory.make_User() + node = factory.make_Node(boot_type=NODE_BOOT.DEBIAN) self.assertFalse(UseCurtin(node, user).is_permitted()) def test_not_permitted_if_already_uses_curtin(self): - node = factory.make_node() - node.use_fastpath_installer() + node = factory.make_Node(boot_type=NODE_BOOT.FASTPATH) user = factory.make_admin() self.assertFalse(UseCurtin(node, user).is_permitted()) - def test_inhibited_if_use_fastpath_installer_tag_uses_expr(self): - make_use_fastpath_installer_tag_with_expression() - node = factory.make_node() - user = factory.make_admin() - self.assertDocTestMatches( - """\ - The use-fastpath-installer tag is defined with an - expression. This expression must instead be updated to set - this node to install with the fast installer. - """, - UseCurtin(node, user).inhibit()) - class TestUseDINodeAction(MAASServerTestCase): - def test_sets_tag(self): - user = factory.make_user() - node = factory.make_node(owner=user) - node.use_fastpath_installer() + def test_sets_boot_type(self): + user = factory.make_User() + node = factory.make_Node(owner=user, boot_type=NODE_BOOT.FASTPATH) action = UseDI(node, user) self.assertTrue(action.is_permitted()) action.execute() - self.assertTrue(node.should_use_traditional_installer()) + self.assertEqual(NODE_BOOT.DEBIAN, node.boot_type) def test_requires_edit_permission(self): - user = factory.make_user() - node = factory.make_node() - node.use_fastpath_installer() + user = factory.make_User() + node = factory.make_Node(boot_type=NODE_BOOT.FASTPATH) self.assertFalse(UseDI(node, user).is_permitted()) def test_not_permitted_if_already_uses_di(self): - node = factory.make_node() - node.use_traditional_installer() + node = factory.make_Node(boot_type=NODE_BOOT.DEBIAN) user = factory.make_admin() self.assertFalse(UseDI(node, user).is_permitted()) - def test_inhibited_if_use_fastpath_installer_tag_uses_expr(self): - make_use_fastpath_installer_tag_with_expression() - node = factory.make_node() + +class TestMarkBrokenAction(MAASServerTestCase): + + def test_changes_status(self): + user = factory.make_User() + node = factory.make_Node(owner=user, status=NODE_STATUS.COMMISSIONING) + action = MarkBroken(node, user) + self.assertTrue(action.is_permitted()) + action.execute() + self.assertEqual(NODE_STATUS.BROKEN, reload_object(node).status) + + def test_updates_error_description(self): + user = factory.make_User() + node = factory.make_Node(owner=user, status=NODE_STATUS.COMMISSIONING) + action = MarkBroken(node, user) + self.assertTrue(action.is_permitted()) + action.execute() + self.assertEqual( + "Manually marked as broken by user '%s'" % user.username, + reload_object(node).error_description + ) + + def test_requires_edit_permission(self): + user = factory.make_User() + node = factory.make_Node() + self.assertFalse(MarkBroken(node, user).is_permitted()) + + +class TestMarkFixedAction(MAASServerTestCase): + + def test_changes_status(self): + node = factory.make_Node(status=NODE_STATUS.BROKEN) user = factory.make_admin() - self.assertDocTestMatches( - """\ - The use-fastpath-installer tag is defined with an - expression. This expression must instead be updated to set - this node to install with the default installer. - """, - UseDI(node, user).inhibit()) + action = MarkFixed(node, user) + self.assertTrue(action.is_permitted()) + action.execute() + self.assertEqual(NODE_STATUS.READY, reload_object(node).status) + + def test_requires_admin_permission(self): + user = factory.make_User() + node = factory.make_Node() + self.assertFalse(MarkFixed(node, user).is_permitted()) + + def test_not_enabled_if_not_broken(self): + status = factory.pick_choice( + NODE_STATUS_CHOICES, but_not=[NODE_STATUS.BROKEN]) + node = factory.make_Node(status=status) + actions = compile_node_actions( + node, factory.make_admin(), classes=[MarkFixed]) + self.assertItemsEqual([], actions) + + +class TestActionsErrorHandling(MAASServerTestCase): + """Tests for error handling in actions. + + This covers RPC exceptions and `ExternalProcessError`s. + """ + exceptions = RPC_EXCEPTIONS + (ExternalProcessError,) + scenarios = [ + (exception_class.__name__, {"exception_class": exception_class}) + for exception_class in exceptions + ] + + def make_exception(self): + if self.exception_class is MultipleFailures: + exception = self.exception_class( + Failure(Exception(factory.make_name("exception")))) + elif self.exception_class is ExternalProcessError: + exception = self.exception_class( + 1, ["cmd"], factory.make_name("exception")) + else: + exception = self.exception_class(factory.make_name("exception")) + return exception + + def patch_rpc_methods(self, node): + exception = self.make_exception() + self.patch(node, 'start').side_effect = exception + self.patch(node, 'stop').side_effect = exception + self.patch_autospec(node, 'start_transition_monitor') + self.patch_autospec(node, 'stop_transition_monitor') + + def make_action(self, action_class, node_status): + node = factory.make_Node( + mac=True, status=node_status, power_type='ether_wake') + admin = factory.make_admin() + return action_class(node, admin) + + def test_Commission_handles_rpc_errors(self): + action = self.make_action(Commission, NODE_STATUS.READY) + self.patch_rpc_methods(action.node) + exception = self.assertRaises(NodeActionError, action.execute) + self.assertEqual( + get_error_message_for_exception( + action.node.start.side_effect), + unicode(exception)) + + def test_AbortCommissioning_handles_rpc_errors(self): + action = self.make_action( + AbortCommissioning, NODE_STATUS.COMMISSIONING) + self.patch_rpc_methods(action.node) + exception = self.assertRaises(NodeActionError, action.execute) + self.assertEqual( + get_error_message_for_exception( + action.node.stop.side_effect), + unicode(exception)) + + def test_AbortOperation_handles_rpc_errors(self): + action = self.make_action( + AbortOperation, NODE_STATUS.DISK_ERASING) + self.patch_rpc_methods(action.node) + exception = self.assertRaises(NodeActionError, action.execute) + self.assertEqual( + get_error_message_for_exception( + action.node.stop.side_effect), + unicode(exception)) + + def test_StartNode_handles_rpc_errors(self): + action = self.make_action(StartNode, NODE_STATUS.READY) + self.patch_rpc_methods(action.node) + exception = self.assertRaises(NodeActionError, action.execute) + self.assertEqual( + get_error_message_for_exception( + action.node.start.side_effect), + unicode(exception)) + + def test_StopNode_handles_rpc_errors(self): + action = self.make_action(StopNode, NODE_STATUS.DEPLOYED) + self.patch_rpc_methods(action.node) + exception = self.assertRaises(NodeActionError, action.execute) + self.assertEqual( + get_error_message_for_exception( + action.node.stop.side_effect), + unicode(exception)) + + def test_ReleaseNode_handles_rpc_errors(self): + action = self.make_action(ReleaseNode, NODE_STATUS.ALLOCATED) + self.patch_rpc_methods(action.node) + exception = self.assertRaises(NodeActionError, action.execute) + self.assertEqual( + get_error_message_for_exception( + action.node.stop.side_effect), + unicode(exception)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_node_constraint_filter_forms.py maas-1.7.6+bzr3376/src/maasserver/tests/test_node_constraint_filter_forms.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_node_constraint_filter_forms.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_node_constraint_filter_forms.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,6 +14,8 @@ __metaclass__ = type __all__ = [] +from random import randint + from django import forms from django.core.exceptions import ValidationError from maasserver.fields import MAC @@ -99,7 +101,7 @@ self.assertEqual([], detect_nonexistent_zone_names([])) def test_detect_nonexistent_zone_names_returns_empty_if_all_OK(self): - zones = [factory.make_zone() for _ in range(3)] + zones = [factory.make_Zone() for _ in range(3)] self.assertEqual( [], detect_nonexistent_zone_names([zone.name for zone in zones])) @@ -115,7 +117,7 @@ detect_nonexistent_zone_names(names)) def test_detect_nonexistent_zone_names_combines_good_and_bad_names(self): - zone = factory.make_zone().name + zone = factory.make_Zone().name non_zone = factory.make_name('nonzone') self.assertEqual( [non_zone], @@ -160,6 +162,12 @@ class TestAcquireNodeForm(MAASServerTestCase): + def set_usable_arch(self): + """Produce an arbitrary, valid, architecture name.""" + arch = '%s/%s' % (factory.make_name('arch'), factory.make_name('sub')) + patch_usable_architectures(self, [arch]) + return arch + def test_strict_form_checks_unknown_constraints(self): data = {'unknown_constraint': 'boo'} form = AcquireNodeForm.Strict(data=data) @@ -178,18 +186,18 @@ self.assertItemsEqual(nodes, form.filter_nodes(Node.objects.all())) def test_no_constraints(self): - nodes = [factory.make_node() for i in range(3)] + nodes = [factory.make_Node() for _ in range(3)] form = AcquireNodeForm(data={}) self.assertTrue(form.is_valid()) self.assertItemsEqual(nodes, Node.objects.all()) def test_hostname(self): - nodes = [factory.make_node() for i in range(3)] + nodes = [factory.make_Node() for _ in range(3)] self.assertConstrainedNodes([nodes[0]], {'name': nodes[0].hostname}) self.assertConstrainedNodes([], {'name': 'unknown-name'}) def test_hostname_with_domain_part(self): - nodes = [factory.make_node() for i in range(3)] + nodes = [factory.make_Node() for _ in range(3)] self.assertConstrainedNodes( [nodes[0]], {'name': '%s.%s' % (nodes[0].hostname, nodes[0].nodegroup.name)}) @@ -199,7 +207,7 @@ self.assertConstrainedNodes( [], {'name': '%s.%s' % (nodes[0].hostname, nodes[1].nodegroup.name)}) - node = factory.make_node(hostname="host21.mydomain") + node = factory.make_Node(hostname="host21.mydomain") self.assertConstrainedNodes( [node], {'name': 'host21.mydomain'}) @@ -209,8 +217,8 @@ {'name': 'host21.%s' % node.nodegroup.name}) def test_cpu_count(self): - node1 = factory.make_node(cpu_count=1) - node2 = factory.make_node(cpu_count=2) + node1 = factory.make_Node(cpu_count=1) + node2 = factory.make_Node(cpu_count=2) nodes = [node1, node2] self.assertConstrainedNodes(nodes, {'cpu_count': '0'}) self.assertConstrainedNodes(nodes, {'cpu_count': '1.0'}) @@ -224,8 +232,8 @@ (form.is_valid(), form.errors)) def test_memory(self): - node1 = factory.make_node(memory=1024) - node2 = factory.make_node(memory=4096) + node1 = factory.make_Node(memory=1024) + node2 = factory.make_Node(memory=4096) self.assertConstrainedNodes([node1, node2], {'mem': '512'}) self.assertConstrainedNodes([node1, node2], {'mem': '1024'}) self.assertConstrainedNodes([node2], {'mem': '2048'}) @@ -236,13 +244,13 @@ def test_invalid_memory(self): form = AcquireNodeForm(data={'mem': 'invalid'}) self.assertEquals( - (False, {'mem': ["Invalid memory: number of MB required."]}), + (False, {'mem': ["Invalid memory: number of MiB required."]}), (form.is_valid(), form.errors)) def test_networks_filters_by_name(self): - networks = factory.make_networks(5) + networks = factory.make_Networks(5) macs = [ - factory.make_mac_address(networks=[network]) + factory.make_MACAddress_with_Node(networks=[network]) for network in networks ] # Filter for this network. Take one in the middle to avoid @@ -253,9 +261,9 @@ {'networks': [networks[pick].name]}) def test_networks_filters_by_ip(self): - networks = factory.make_networks(5) + networks = factory.make_Networks(5) macs = [ - factory.make_mac_address(networks=[network]) + factory.make_MACAddress_with_Node(networks=[network]) for network in networks ] # Filter for this network. Take one in the middle to avoid @@ -267,9 +275,9 @@ def test_networks_filters_by_vlan_tag(self): vlan_tags = list(range(5)) - networks = [factory.make_network(vlan_tag=tag) for tag in vlan_tags] + networks = [factory.make_Network(vlan_tag=tag) for tag in vlan_tags] macs = [ - factory.make_mac_address(networks=[network]) + factory.make_MACAddress_with_Node(networks=[network]) for network in networks ] # Filter for this network. Take one in the middle to avoid @@ -280,15 +288,15 @@ {'networks': ['vlan:%d' % vlan_tags[pick]]}) def test_networks_filter_ignores_macs_on_other_networks(self): - network = factory.make_network() - node = factory.make_node() - factory.make_mac_address(node=node, networks=[network]) - factory.make_mac_address(node=node, networks=[factory.make_network()]) + network = factory.make_Network() + node = factory.make_Node() + factory.make_MACAddress(node=node, networks=[network]) + factory.make_MACAddress(node=node, networks=[factory.make_Network()]) self.assertConstrainedNodes({node}, {'networks': [network.name]}) def test_networks_filter_ignores_other_networks_on_mac(self): - networks = factory.make_networks(3) - mac = factory.make_mac_address(networks=networks) + networks = factory.make_Networks(3) + mac = factory.make_MACAddress_with_Node(networks=networks) self.assertConstrainedNodes( {mac.node}, {'networks': [networks[1].name]}) @@ -314,7 +322,7 @@ (form.is_valid(), form.errors)) def test_networks_combines_filters(self): - networks = factory.make_networks(3) + networks = factory.make_Networks(3) [ network_by_name, network_by_ip, @@ -326,12 +334,15 @@ but_not=[network.vlan_tag for network in networks]) network_by_vlan.save() - factory.make_mac_address(networks=[network_by_name, network_by_ip]) - factory.make_mac_address(networks=[network_by_name, network_by_vlan]) - right_mac = factory.make_mac_address( + factory.make_MACAddress_with_Node( + networks=[network_by_name, network_by_ip]) + factory.make_MACAddress_with_Node( + networks=[network_by_name, network_by_vlan]) + right_mac = factory.make_MACAddress_with_Node( networks=[network_by_name, network_by_ip, network_by_vlan]) - factory.make_mac_address(networks=[network_by_ip, network_by_vlan]) - factory.make_mac_address(networks=[]) + factory.make_MACAddress_with_Node( + networks=[network_by_ip, network_by_vlan]) + factory.make_MACAddress_with_Node(networks=[]) self.assertConstrainedNodes( {right_mac.node}, @@ -344,17 +355,17 @@ }) def test_networks_ignores_other_networks(self): - [this_network, other_network] = factory.make_networks(2) - mac = factory.make_mac_address( + [this_network, other_network] = factory.make_Networks(2) + mac = factory.make_MACAddress_with_Node( networks=[this_network, other_network]) self.assertConstrainedNodes( [mac.node], {'networks': [this_network.name]}) def test_not_networks_filters_by_name(self): - networks = factory.make_networks(2) + networks = factory.make_Networks(2) macs = [ - factory.make_mac_address(networks=[network]) + factory.make_MACAddress_with_Node(networks=[network]) for network in networks ] self.assertConstrainedNodes( @@ -362,9 +373,9 @@ {'not_networks': [networks[1].name]}) def test_not_networks_filters_by_ip(self): - networks = factory.make_networks(2) + networks = factory.make_Networks(2) macs = [ - factory.make_mac_address(networks=[network]) + factory.make_MACAddress_with_Node(networks=[network]) for network in networks ] self.assertConstrainedNodes( @@ -373,9 +384,9 @@ def test_not_networks_filters_by_vlan_tag(self): vlan_tags = range(2) - networks = [factory.make_network(vlan_tag=tag) for tag in vlan_tags] + networks = [factory.make_Network(vlan_tag=tag) for tag in vlan_tags] macs = [ - factory.make_mac_address(networks=[network]) + factory.make_MACAddress_with_Node(networks=[network]) for network in networks ] self.assertConstrainedNodes( @@ -383,23 +394,23 @@ {'not_networks': ['vlan:%d' % vlan_tags[1]]}) def test_not_networks_accepts_nodes_without_network_connections(self): - macless_node = factory.make_node() - unconnected_mac = factory.make_mac_address(networks=[]) + macless_node = factory.make_Node() + unconnected_mac = factory.make_MACAddress_with_Node(networks=[]) self.assertConstrainedNodes( {macless_node, unconnected_mac.node}, - {'not_networks': [factory.make_network().name]}) + {'not_networks': [factory.make_Network().name]}) def test_not_networks_excludes_node_with_any_mac_on_not_networks(self): - network = factory.make_network() - node = factory.make_node() - factory.make_mac_address(node=node, networks=[network]) - factory.make_mac_address(node=node, networks=[factory.make_network()]) + network = factory.make_Network() + node = factory.make_Node() + factory.make_MACAddress(node=node, networks=[network]) + factory.make_MACAddress(node=node, networks=[factory.make_Network()]) self.assertConstrainedNodes([], {'not_networks': [network.name]}) def test_not_networks_excludes_node_with_mac_on_any_not_networks(self): - networks = factory.make_networks(3) + networks = factory.make_Networks(3) not_network = networks[1] - factory.make_mac_address(networks=[not_network]) + factory.make_MACAddress_with_Node(networks=[not_network]) self.assertConstrainedNodes([], {'not_networks': [not_network.name]}) def test_invalid_not_networks(self): @@ -423,7 +434,7 @@ (form.is_valid(), form.errors)) def test_not_networks_combines_filters(self): - networks = factory.make_networks(5) + networks = factory.make_Networks(5) [ network_by_name, network_by_ip, @@ -437,12 +448,16 @@ but_not=[network.vlan_tag for network in networks]) network_by_vlan.save() - factory.make_mac_address(networks=[network_by_name]) - factory.make_mac_address(networks=[network_by_name, network_by_ip]) - factory.make_mac_address(networks=[network_by_name, network_by_vlan]) - factory.make_mac_address(networks=[network_by_vlan]) - factory.make_mac_address(networks=[network_by_vlan, other_network]) - right_mac = factory.make_mac_address(networks=[remaining_network]) + factory.make_MACAddress_with_Node(networks=[network_by_name]) + factory.make_MACAddress_with_Node( + networks=[network_by_name, network_by_ip]) + factory.make_MACAddress_with_Node( + networks=[network_by_name, network_by_vlan]) + factory.make_MACAddress_with_Node(networks=[network_by_vlan]) + factory.make_MACAddress_with_Node( + networks=[network_by_vlan, other_network]) + right_mac = factory.make_MACAddress_with_Node( + networks=[remaining_network]) self.assertConstrainedNodes( {right_mac.node}, @@ -457,9 +472,9 @@ def test_connected_to(self): mac1 = MAC('aa:bb:cc:dd:ee:ff') mac2 = MAC('00:11:22:33:44:55') - node1 = factory.make_node(routers=[mac1, mac2]) - node2 = factory.make_node(routers=[mac1]) - factory.make_node() + node1 = factory.make_Node(routers=[mac1, mac2]) + node2 = factory.make_Node(routers=[mac1]) + factory.make_Node() self.assertConstrainedNodes( [node1], {'connected_to': [ mac1.get_raw(), mac2.get_raw()]}) @@ -477,9 +492,9 @@ def test_not_connected_to(self): mac1 = MAC('aa:bb:cc:dd:ee:ff') mac2 = MAC('00:11:22:33:44:55') - node1 = factory.make_node(routers=[mac1, mac2]) - node2 = factory.make_node(routers=[mac1]) - node3 = factory.make_node() + node1 = factory.make_Node(routers=[mac1, mac2]) + node2 = factory.make_Node(routers=[mac1]) + node3 = factory.make_Node() self.assertConstrainedNodes( [node3], {'not_connected_to': [ mac1.get_raw(), mac2.get_raw()]}) @@ -497,11 +512,11 @@ (form.is_valid(), form.errors)) def test_zone(self): - node1 = factory.make_node() - node2 = factory.make_node() - node3 = factory.make_node() - zone1 = factory.make_zone(nodes=[node1, node2]) - zone2 = factory.make_zone() + node1 = factory.make_Node() + node2 = factory.make_Node() + node3 = factory.make_Node() + zone1 = factory.make_Zone(nodes=[node1, node2]) + zone2 = factory.make_Zone() self.assertConstrainedNodes( [node1, node2], {'zone': zone1.name}) @@ -519,15 +534,15 @@ (form.is_valid(), form.errors)) def test_not_in_zone_excludes_given_zones(self): - ineligible_nodes = [factory.make_node() for _ in range(2)] - eligible_nodes = [factory.make_node() for _ in range(2)] + ineligible_nodes = [factory.make_Node() for _ in range(2)] + eligible_nodes = [factory.make_Node() for _ in range(2)] self.assertConstrainedNodes( eligible_nodes, {'not_in_zone': [node.zone.name for node in ineligible_nodes]}) def test_not_in_zone_with_required_zone_yields_no_nodes(self): - zone = factory.make_zone() - factory.make_node(zone=zone) + zone = factory.make_Zone() + factory.make_Node(zone=zone) self.assertConstrainedNodes([], {'zone': zone, 'not_in_zone': [zone]}) def test_validates_not_in_zone(self): @@ -548,19 +563,19 @@ # Three nodes, all in different physical zones. If we say we don't # want the first node's zone or the second node's zone, we get the node # in the remaining zone. - nodes = [factory.make_node() for _ in range(3)] + nodes = [factory.make_Node() for _ in range(3)] self.assertConstrainedNodes( [nodes[2]], {'not_in_zone': [nodes[0].zone.name, nodes[1].zone.name]}) def test_tags(self): - tag_big = factory.make_tag(name='big') - tag_burly = factory.make_tag(name='burly') - node_big = factory.make_node() + tag_big = factory.make_Tag(name='big') + tag_burly = factory.make_Tag(name='burly') + node_big = factory.make_Node() node_big.tags.add(tag_big) - node_burly = factory.make_node() + node_burly = factory.make_Node() node_burly.tags.add(tag_burly) - node_bignburly = factory.make_node() + node_bignburly = factory.make_Node() node_bignburly.tags.add(tag_big) node_bignburly.tags.add(tag_burly) self.assertConstrainedNodes( @@ -570,6 +585,30 @@ self.assertConstrainedNodes( [node_bignburly], {'tags': ['big', 'burly']}) + def test_not_tags_negates_individual_tags(self): + tag = factory.make_Tag() + tagged_node = factory.make_Node() + tagged_node.tags.add(tag) + untagged_node = factory.make_Node() + + self.assertConstrainedNodes( + [untagged_node], {'not_tags': [tag.name]}) + + def test_not_tags_negates_multiple_tags(self): + tagged_node = factory.make_Node() + tags = [ + factory.make_Tag('spam'), + factory.make_Tag('eggs'), + factory.make_Tag('ham'), + ] + tagged_node.tags = tags + partially_tagged_node = factory.make_Node() + partially_tagged_node.tags.add(tags[0]) + + self.assertConstrainedNodes( + [partially_tagged_node], + {'not_tags': ['eggs', 'ham']}) + def test_invalid_tags(self): form = AcquireNodeForm(data={'tags': ['big', 'unknown']}) self.assertEquals( @@ -579,15 +618,15 @@ (form.is_valid(), form.errors)) def test_combined_constraints(self): - tag_big = factory.make_tag(name='big') + tag_big = factory.make_Tag(name='big') arch = '%s/generic' % factory.make_name('arch') wrong_arch = '%s/generic' % factory.make_name('arch') patch_usable_architectures(self, [arch, wrong_arch]) - node_big = factory.make_node(architecture=arch) + node_big = factory.make_Node(architecture=arch) node_big.tags.add(tag_big) - node_small = factory.make_node(architecture=arch) + node_small = factory.make_Node(architecture=arch) ignore_unused(node_small) - node_big_other_arch = factory.make_node(architecture=wrong_arch) + node_big_other_arch = factory.make_Node(architecture=wrong_arch) node_big_other_arch.tags.add(tag_big) self.assertConstrainedNodes( [node_big, node_big_other_arch], {'tags': ['big']}) @@ -600,18 +639,88 @@ self.assertEquals( (False, { 'tags': ["No such tag(s): 'unknown'."], - 'mem': ["Invalid memory: number of MB required."], + 'mem': ["Invalid memory: number of MiB required."], }), (form.is_valid(), form.errors)) def test_returns_distinct_nodes(self): - network = factory.make_network() - node = factory.make_node() + network = factory.make_Network() + node = factory.make_Node() # Create multiple NICs for `node` connected to `network`. [ - factory.make_mac_address(node=node, networks=[network]) + factory.make_MACAddress(node=node, networks=[network]) for _ in range(3) ] self.assertConstrainedNodes( {node}, {'networks': [network.name]}) + + def test_describe_constraints_returns_empty_if_no_constraints(self): + form = AcquireNodeForm(data={}) + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual('', form.describe_constraints()) + + def test_describe_constraints_shows_simple_constraint(self): + hostname = factory.make_name('host') + form = AcquireNodeForm(data={'name': hostname}) + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual('name=%s' % hostname, form.describe_constraints()) + + def test_describe_constraints_shows_arch_as_special_case(self): + # The "arch" field is technically a single-valued string field + # on the form, but its "cleaning" produces a list of strings. + arch = self.set_usable_arch() + form = AcquireNodeForm(data={'arch': arch}) + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual('arch=%s' % arch, form.describe_constraints()) + + def test_describe_constraints_shows_multi_constraint(self): + tag = factory.make_Tag() + form = AcquireNodeForm(data={'tags': [tag.name]}) + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual('tags=%s' % tag.name, form.describe_constraints()) + + def test_describe_constraints_sorts_constraints(self): + hostname = factory.make_name('host') + zone = factory.make_Zone() + form = AcquireNodeForm(data={'name': hostname, 'zone': zone}) + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual( + 'name=%s zone=%s' % (hostname, zone), + form.describe_constraints()) + + def test_describe_constraints_combines_constraint_values(self): + tag1 = factory.make_Tag() + tag2 = factory.make_Tag() + form = AcquireNodeForm(data={'tags': [tag1.name, tag2.name]}) + self.assertTrue(form.is_valid(), form.errors) + self.assertEqual( + 'tags=%s,%s' % tuple(sorted([tag1.name, tag2.name])), + form.describe_constraints()) + + def test_describe_constraints_shows_all_constraints(self): + constraints = { + 'name': factory.make_name('host'), + 'arch': self.set_usable_arch(), + 'cpu_count': randint(1, 32), + 'mem': randint(1024, 256 * 1024), + 'tags': [factory.make_Tag().name], + 'not_tags': [factory.make_Tag().name], + 'networks': [factory.make_Network().name], + 'not_networks': [factory.make_Network().name], + 'connected_to': [factory.make_mac_address()], + 'not_connected_to': [factory.make_mac_address()], + 'zone': factory.make_Zone(), + 'not_in_zone': [factory.make_Zone().name], + } + form = AcquireNodeForm(data=constraints) + self.assertTrue(form.is_valid(), form.errors) + # Check first: we didn't forget to test any attributes. When we add + # a constraint to the form, we'll have to add it here as well. + self.assertItemsEqual(form.fields.keys(), constraints.keys()) + + described_constraints = { + constraint.split('=', 1)[0] + for constraint in form.describe_constraints().split() + } + self.assertItemsEqual(constraints.keys(), described_constraints) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_node_query.py maas-1.7.6+bzr3376/src/maasserver/tests/test_node_query.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_node_query.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_node_query.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,132 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for node power status query when state changes.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import random + +from maasserver.node_status import ( + get_failed_status, + NODE_STATUS, + ) +from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture +from maasserver.testing.eventloop import ( + RegionEventLoopFixture, + RunningEventLoopFixture, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) +from provisioningserver.power.poweraction import PowerActionFail +from provisioningserver.rpc import cluster as cluster_module +from provisioningserver.rpc.testing import always_succeed_with +from twisted.internet.task import Clock + + +class TestStatusQueryEvent(MAASServerTestCase): + + def setUp(self): + super(TestStatusQueryEvent, self).setUp() + # Circular imports. + from maasserver import node_query + self.node_query = node_query + + def test_changing_status_of_node_emits_event(self): + mock_update = self.patch( + self.node_query, 'wait_to_update_power_state_of_node') + old_status = NODE_STATUS.COMMISSIONING + node = factory.make_Node(status=old_status, power_type='virsh') + node.status = get_failed_status(old_status) + node.save() + self.assertThat( + mock_update, + MockCalledOnceWith(node.system_id)) + + def test_changing_not_tracked_status_of_node_doesnt_emit_event(self): + mock_update = self.patch( + self.node_query, "wait_to_update_power_state_of_node") + old_status = NODE_STATUS.ALLOCATED + node = factory.make_Node(status=old_status, power_type="virsh") + node.status = NODE_STATUS.DEPLOYING + node.save() + self.assertThat( + mock_update, + MockNotCalled()) + + +class TestWaitToUpdatePowerStateOfNode(MAASServerTestCase): + + def setUp(self): + super(TestWaitToUpdatePowerStateOfNode, self).setUp() + # Circular imports. + from maasserver import node_query + self.node_query = node_query + + def test__calls_update_power_state_of_node_after_wait_time(self): + mock_defer_to_thread = self.patch(self.node_query, 'deferToThread') + node = factory.make_Node(power_type="virsh") + clock = Clock() + self.node_query.wait_to_update_power_state_of_node( + node.system_id, clock=clock) + + self.expectThat(mock_defer_to_thread, MockNotCalled()) + clock.advance(self.node_query.WAIT_TO_QUERY.total_seconds()) + self.expectThat( + mock_defer_to_thread, + MockCalledOnceWith( + self.node_query.update_power_state_of_node, node.system_id)) + + +class TestUpdatePowerStateOfNode(MAASServerTestCase): + + def setUp(self): + super(TestUpdatePowerStateOfNode, self).setUp() + # Circular imports. + from maasserver import node_query + self.node_query = node_query + + def prepare_rpc(self, nodegroup, side_effect): + self.useFixture(RegionEventLoopFixture("rpc")) + self.useFixture(RunningEventLoopFixture()) + self.rpc_fixture = self.useFixture(MockLiveRegionToClusterRPCFixture()) + protocol = self.rpc_fixture.makeCluster( + nodegroup, cluster_module.PowerQuery) + protocol.PowerQuery.side_effect = side_effect + + def test__updates_node_power_state(self): + node = factory.make_Node(power_type="virsh") + random_state = random.choice(["on", "off"]) + self.prepare_rpc( + node.nodegroup, + side_effect=always_succeed_with({"state": random_state})) + self.node_query.update_power_state_of_node(node.system_id) + self.assertEqual(random_state, reload_object(node).power_state) + + def test__handles_deleted_node(self): + node = factory.make_Node(power_type="virsh") + node.delete() + self.node_query.update_power_state_of_node(node.system_id) + #: Test is that no error is raised + + def test__updates_node_power_state_to_error_if_failure(self): + node = factory.make_Node(power_type="virsh") + self.prepare_rpc( + node.nodegroup, + side_effect=PowerActionFail()) + self.node_query.update_power_state_of_node(node.system_id) + self.assertEqual("error", reload_object(node).power_state) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_nonces_cleanup.py maas-1.7.6+bzr3376/src/maasserver/tests/test_nonces_cleanup.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_nonces_cleanup.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_nonces_cleanup.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the +# Copyright 2013-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the nonces cleanup module.""" @@ -17,6 +17,7 @@ import time +from maasserver import nonces_cleanup from maasserver.nonces_cleanup import ( cleanup_old_nonces, create_checkpoint_nonce, @@ -24,15 +25,25 @@ find_checkpoint_nonce, get_time_string, key_prefix, + NonceCleanupService, time as module_time, timestamp_threshold, ) from maasserver.testing.testcase import MAASServerTestCase +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from mock import call from piston.models import Nonce from testtools.matchers import ( ContainsAll, StartsWith, ) +from twisted.internet.defer import maybeDeferred +from twisted.internet.task import Clock class TestCleanupOldNonces(MAASServerTestCase): @@ -46,11 +57,11 @@ # they were created now - timestamp_threshold seconds ago. timemod = self.patch(module_time, "time") timemod.return_value = now - timestamp_threshold - old_nonces = [Nonce.objects.create() for i in range(3)] + old_nonces = [Nonce.objects.create() for _ in range(3)] self.assertEquals(0, cleanup_old_nonces()) # Patch the module's time module back. timemod.return_value = now - new_nonces = [Nonce.objects.create() for i in range(3)] + new_nonces = [Nonce.objects.create() for _ in range(3)] cleanup_count = cleanup_old_nonces() @@ -84,9 +95,9 @@ def test_delete_old_nonces_delete_nonces(self): # Create old nonces. - [Nonce.objects.create() for i in range(3)] + [Nonce.objects.create() for _ in range(3)] checkpoint = Nonce.objects.create() - new_nonces = [Nonce.objects.create() for i in range(3)] + new_nonces = [Nonce.objects.create() for _ in range(3)] delete_old_nonces(checkpoint) self.assertItemsEqual(new_nonces, Nonce.objects.all()) @@ -119,3 +130,45 @@ def test_get_time_string_ends_with_suffix(self): now = time.time() self.assertThat(get_time_string(now), StartsWith(key_prefix)) + + +class TestNonceCleanupService(MAASServerTestCase): + + def test_init_with_default_interval(self): + # The service itself calls `cleanup_old_nonces` in a thread, via + # a couple of decorators. This indirection makes it clearer to + # mock `cleanup_old_nonces` here and track calls to it. + cleanup_old_nonces = self.patch(nonces_cleanup, "cleanup_old_nonces") + cleanup_old_nonces.__name__ = factory.make_string().encode("ascii") + # Making `deferToThread` use the current thread helps testing. + self.patch(nonces_cleanup, "deferToThread", maybeDeferred) + + service = NonceCleanupService() + # Use a deterministic clock instead of the reactor for testing. + service.clock = Clock() + + # The interval is stored as `step` by TimerService, + # NonceCleanupService's parent class. + interval = 24 * 60 * 60 # seconds. + self.assertEqual(service.step, interval) + + # `cleanup_old_nonces` is not called before the service is + # started. + self.assertThat(cleanup_old_nonces, MockNotCalled()) + # `cleanup_old_nonces` is called the moment the service is + # started. + service.startService() + self.assertThat(cleanup_old_nonces, MockCalledOnceWith()) + # Advancing the clock by `interval - 1` means that + # `cleanup_old_nonces` has still only been called once. + service.clock.advance(interval - 1) + self.assertThat(cleanup_old_nonces, MockCalledOnceWith()) + # Advancing the clock one more second causes another call to + # `cleanup_old_nonces`. + service.clock.advance(1) + self.assertThat(cleanup_old_nonces, MockCallsMatch(call(), call())) + + def test_interval_can_be_set(self): + interval = self.getUniqueInteger() + service = NonceCleanupService(interval) + self.assertEqual(interval, service.step) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_populate_tags.py maas-1.7.6+bzr3376/src/maasserver/tests/test_populate_tags.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_populate_tags.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_populate_tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.populate_tags`.""" @@ -14,86 +14,301 @@ __metaclass__ = type __all__ = [] +from itertools import izip + +from fixtures import FakeLogger from maasserver import populate_tags as populate_tags_module +from maasserver.enum import NODEGROUP_STATUS from maasserver.models import Tag from maasserver.populate_tags import ( + _do_populate_tags, + _get_clients_for_populating_tags, populate_tags, populate_tags_for_single_node, - tag_nsmap, + ) +from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture +from maasserver.testing.eventloop import ( + RegionEventLoopFixture, + RunningEventLoopFixture, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import MockCalledOnceWith from metadataserver.models import commissioningscript -import mock +from mock import ( + ANY, + create_autospec, + sentinel, + ) +from provisioningserver.rpc.cluster import EvaluateTag +from provisioningserver.rpc.common import Client +from provisioningserver.rpc.testing import ( + always_fail_with, + always_succeed_with, + ) +from provisioningserver.utils.twisted import asynchronous +from testtools.deferredruntest import extract_result +from testtools.monkey import MonkeyPatcher +from twisted.internet import defer + + +def make_accepted_NodeGroup(): + return factory.make_NodeGroup(status=NODEGROUP_STATUS.ACCEPTED) + + +def make_Tag_without_populating(): + # Create a tag but prevent evaluation when saving. + dont_populate = MonkeyPatcher((Tag, "populate_nodes", lambda self: None)) + return dont_populate.run_with_patches(factory.make_Tag) + + +class TestGetClientsForPopulatingTags(MAASServerTestCase): + + def test__returns_no_clients_when_there_are_no_clusters(self): + tag_name = factory.make_name("tag") + clients = _get_clients_for_populating_tags([], tag_name) + self.assertEqual([], clients) + + def patch_getClientFor(self): + return self.patch_autospec(populate_tags_module, "getClientFor") + + def test__returns_no_clients_when_there_is_an_error(self): + nodegroup_with_connection = make_accepted_NodeGroup() + nodegroup_without_connection = make_accepted_NodeGroup() + + def getClientFor(uuid, timeout): + if uuid == nodegroup_with_connection.uuid: + return defer.succeed(sentinel.client) + else: + return defer.fail(ZeroDivisionError()) + self.patch_getClientFor().side_effect = getClientFor + + tag_name = factory.make_name("tag") + clusters = [ + (nodegroup_with_connection.uuid, + nodegroup_with_connection.cluster_name), + (nodegroup_without_connection.uuid, + nodegroup_without_connection.cluster_name), + ] + clients = _get_clients_for_populating_tags(clusters, tag_name) + self.assertEqual([sentinel.client], clients) + + def test__logs_errors_obtaining_clients(self): + getClientFor = self.patch_getClientFor() + getClientFor.side_effect = always_fail_with( + ZeroDivisionError("an error message one would surmise")) + nodegroup = make_accepted_NodeGroup() + tag_name = factory.make_name("tag") + clusters = [(nodegroup.uuid, nodegroup.cluster_name)] + with FakeLogger("maas") as log: + _get_clients_for_populating_tags(clusters, tag_name) + self.assertDocTestMatches( + "Cannot evaluate tag ... on cluster ... (...): ... surmise", + log.output) + + def test__waits_for_clients_for_30_seconds_by_default(self): + getClientFor = self.patch_getClientFor() + getClientFor.side_effect = always_succeed_with(sentinel.client) + nodegroup = make_accepted_NodeGroup() + tag_name = factory.make_name("tag") + clusters = [(nodegroup.uuid, nodegroup.cluster_name)] + clients = _get_clients_for_populating_tags(clusters, tag_name) + self.assertEqual([sentinel.client], clients) + self.assertThat( + getClientFor, MockCalledOnceWith( + nodegroup.uuid, timeout=30)) + + def test__obtains_multiple_clients(self): + getClientFor = self.patch_getClientFor() + # Return a 2-tuple as a stand-in for a real client. + getClientFor.side_effect = lambda uuid, timeout: ( + defer.succeed((sentinel.client, uuid))) + nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] + tag_name = factory.make_name("tag") + clusters = [(ng.uuid, ng.cluster_name) for ng in nodegroups] + clients = _get_clients_for_populating_tags(clusters, tag_name) + self.assertItemsEqual( + [(sentinel.client, nodegroup.uuid) for nodegroup in nodegroups], + clients) + + +class TestDoPopulateTags(MAASServerTestCase): + + def patch_clients(self, nodegroups): + clients = [create_autospec(Client, instance=True) for _ in nodegroups] + for nodegroup, client in izip(nodegroups, clients): + client.side_effect = always_succeed_with(None) + client.ident = nodegroup.uuid + + _get_clients = self.patch_autospec( + populate_tags_module, "_get_clients_for_populating_tags") + _get_clients.return_value = defer.succeed(clients) + + return clients + + def test__makes_calls_to_each_client_given(self): + nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] + clients = self.patch_clients(nodegroups) + + tag_name = factory.make_name("tag") + tag_definition = factory.make_name("definition") + tag_nsmap_prefix = factory.make_name("prefix") + tag_nsmap_uri = factory.make_name("uri") + tag_nsmap = {tag_nsmap_prefix: tag_nsmap_uri} + + clusters = list( + (ng.uuid, ng.cluster_name, ng.api_credentials) + for ng in nodegroups) + + [d] = _do_populate_tags( + clusters, tag_name, tag_definition, tag_nsmap) + + self.assertIsNone(extract_result(d)) + + for nodegroup, client in izip(nodegroups, clients): + self.expectThat(client, MockCalledOnceWith( + EvaluateTag, tag_name=tag_name, tag_definition=tag_definition, + tag_nsmap=[{"prefix": tag_nsmap_prefix, "uri": tag_nsmap_uri}], + credentials=nodegroup.api_credentials)) + + def test__logs_successes(self): + nodegroups = [make_accepted_NodeGroup()] + self.patch_clients(nodegroups) + + tag_name = factory.make_name("tag") + tag_definition = factory.make_name("definition") + tag_nsmap = {} + + clusters = list( + (ng.uuid, ng.cluster_name, ng.api_credentials) + for ng in nodegroups) + + with FakeLogger("maas") as log: + [d] = _do_populate_tags( + clusters, tag_name, tag_definition, tag_nsmap) + self.assertIsNone(extract_result(d)) + + self.assertDocTestMatches( + "Tag tag-... (definition-...) evaluated on cluster ... (...)", + log.output) + + def test__logs_failures(self): + nodegroups = [make_accepted_NodeGroup()] + [client] = self.patch_clients(nodegroups) + client.side_effect = always_fail_with( + ZeroDivisionError("splendid day for a spot of cricket")) + + tag_name = factory.make_name("tag") + tag_definition = factory.make_name("definition") + tag_nsmap = {} + + clusters = list( + (ng.uuid, ng.cluster_name, ng.api_credentials) + for ng in nodegroups) + + with FakeLogger("maas") as log: + [d] = _do_populate_tags( + clusters, tag_name, tag_definition, tag_nsmap) + self.assertIsNone(extract_result(d)) + + self.assertDocTestMatches( + "Tag tag-... (definition-...) could not be evaluated ... (...): " + "splendid day for a spot of cricket", log.output) class TestPopulateTags(MAASServerTestCase): - def test_populate_tags_task_routed_to_nodegroup_worker(self): - nodegroup = factory.make_node_group() - tag = factory.make_tag() - task = self.patch(populate_tags_module, 'update_node_tags') + def patch_do_populate_tags(self): + do_populate_tags = self.patch_autospec( + populate_tags_module, "_do_populate_tags") + do_populate_tags.return_value = [sentinel.d] + return do_populate_tags + + def test__calls_do_populate_tags_with_no_clusters(self): + do_populate_tags = self.patch_do_populate_tags() + tag = make_Tag_without_populating() populate_tags(tag) - args, kwargs = task.apply_async.call_args - self.assertEqual(nodegroup.work_queue, kwargs['queue']) + self.assertThat(do_populate_tags, MockCalledOnceWith( + (), tag.name, tag.definition, populate_tags_module.tag_nsmap)) - def test_populate_tags_task_routed_to_all_nodegroup_workers(self): - nodegroups = [factory.make_node_group() for i in range(5)] - tag = factory.make_tag() - refresh = self.patch(populate_tags_module, 'refresh_worker') - task = self.patch(populate_tags_module, 'update_node_tags') + def test__calls_do_populate_tags_with_clusters(self): + do_populate_tags = self.patch_do_populate_tags() + nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] + tag = make_Tag_without_populating() populate_tags(tag) - refresh_calls = [mock.call(nodegroup) for nodegroup in nodegroups] - refresh.assert_has_calls(refresh_calls, any_order=True) - task_calls = [ - mock.call( - queue=nodegroup.work_queue, - kwargs={ - 'tag_name': tag.name, - 'tag_definition': tag.definition, - 'tag_nsmap': tag_nsmap, - }, - ) - for nodegroup in nodegroups - ] - task.apply_async.assert_has_calls(task_calls, any_order=True) + clusters_expected = tuple( + (ng.uuid, ng.cluster_name, ng.api_credentials) + for ng in nodegroups) + self.assertThat(do_populate_tags, MockCalledOnceWith( + clusters_expected, tag.name, tag.definition, + populate_tags_module.tag_nsmap)) + + +class TestPopulateTagsEndToNearlyEnd(MAASServerTestCase): + + def prepare_live_rpc(self): + self.useFixture(RegionEventLoopFixture("rpc")) + self.useFixture(RunningEventLoopFixture()) + return self.useFixture(MockLiveRegionToClusterRPCFixture()) + + def test__calls_are_made_to_all_clusters(self): + rpc_fixture = self.prepare_live_rpc() + nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] + protocols = [] + for nodegroup in nodegroups: + protocol = rpc_fixture.makeCluster(nodegroup, EvaluateTag) + protocol.EvaluateTag.side_effect = always_succeed_with({}) + protocols.append(protocol) + tag = make_Tag_without_populating() + + d = populate_tags(tag) + + # `d` is a testing-only convenience. We must wait for it to fire, and + # we must do that from the reactor thread. + wait_for_populate = asynchronous(lambda: d) + wait_for_populate().wait(10) + + for nodegroup, protocol in izip(nodegroups, protocols): + self.expectThat(protocol.EvaluateTag, MockCalledOnceWith( + protocol, tag_name=tag.name, tag_definition=tag.definition, + tag_nsmap=ANY, credentials=nodegroup.api_credentials)) class TestPopulateTagsForSingleNode(MAASServerTestCase): def test_updates_node_with_all_applicable_tags(self): - node = factory.make_node() - factory.make_node_commission_result( + node = factory.make_Node() + factory.make_NodeResult_for_commissioning( node, commissioningscript.LSHW_OUTPUT_NAME, 0, b"") - factory.make_node_commission_result( + factory.make_NodeResult_for_commissioning( node, commissioningscript.LLDP_OUTPUT_NAME, 0, b"") tags = [ - factory.make_tag("foo", "/foo"), - factory.make_tag("bar", "//lldp:bar"), - factory.make_tag("baz", "/foo/bar"), + factory.make_Tag("foo", "/foo"), + factory.make_Tag("bar", "//lldp:bar"), + factory.make_Tag("baz", "/foo/bar"), ] populate_tags_for_single_node(tags, node) self.assertItemsEqual( ["foo", "bar"], [tag.name for tag in node.tags.all()]) def test_ignores_tags_with_unrecognised_namespaces(self): - node = factory.make_node() - factory.make_node_commission_result( + node = factory.make_Node() + factory.make_NodeResult_for_commissioning( node, commissioningscript.LSHW_OUTPUT_NAME, 0, b"") tags = [ - factory.make_tag("foo", "/foo"), - factory.make_tag("lou", "//nge:bar"), + factory.make_Tag("foo", "/foo"), + factory.make_Tag("lou", "//nge:bar"), ] populate_tags_for_single_node(tags, node) # Look mom, no exception! self.assertSequenceEqual( ["foo"], [tag.name for tag in node.tags.all()]) def test_ignores_tags_without_definition(self): - node = factory.make_node() - factory.make_node_commission_result( + node = factory.make_Node() + factory.make_NodeResult_for_commissioning( node, commissioningscript.LSHW_OUTPUT_NAME, 0, b"") tags = [ - factory.make_tag("foo", "/foo"), + factory.make_Tag("foo", "/foo"), Tag(name="empty", definition=""), Tag(name="null", definition=None), ] diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_preseed.py maas-1.7.6+bzr3376/src/maasserver/tests/test_preseed.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_preseed.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_preseed.py 2015-07-10 01:27:14.000000000 +0000 @@ -17,34 +17,47 @@ import httplib import os from pipes import quote +import random from urlparse import urlparse from django.conf import settings from django.core.urlresolvers import reverse +from maasserver import preseed as preseed_module +from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.enum import ( - DISTRO_SERIES, + NODE_BOOT, NODE_STATUS, NODEGROUPINTERFACE_MANAGEMENT, PRESEED_TYPE, ) -from maasserver.exceptions import MAASAPIException +from maasserver.exceptions import ( + ClusterUnavailable, + MissingBootImage, + PreseedError, + ) from maasserver.models import Config from maasserver.preseed import ( + compose_curtin_maas_reporter, + compose_curtin_network_preseed, compose_enlistment_preseed_url, compose_preseed_url, GENERIC_FILENAME, + get_available_purpose_for_node, get_curtin_config, get_curtin_context, + get_curtin_image, get_curtin_installer_url, get_curtin_userdata, get_enlist_preseed, - get_hostname_and_path, + get_netloc_and_path, get_node_preseed_context, get_preseed, get_preseed_context, get_preseed_filenames, get_preseed_template, get_preseed_type_for, + get_supported_purposes_for_node, + list_gateways_and_macs, load_preseed_template, pick_cluster_controller_address, PreseedTemplate, @@ -53,24 +66,54 @@ split_subarch, TemplateNotFoundError, ) +from maasserver.rpc.testing.mixins import PreseedRPCMixin from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.factory import factory +from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import map_enum -from maasserver.utils.network import make_network +from maasserver.utils import absolute_reverse +from maastesting.matchers import MockCalledOnceWith +from metadataserver.models import NodeKey +from mock import ANY +from provisioningserver.drivers.osystem.ubuntu import UbuntuOS +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.utils import locate_config +from provisioningserver.utils.enum import map_enum +from provisioningserver.utils.fs import read_text_file +from provisioningserver.utils.network import make_network from testtools.matchers import ( AllMatch, Contains, ContainsAll, + Equals, HasLength, IsInstance, MatchesAll, Not, StartsWith, ) +from twisted.internet import defer import yaml +class BootImageHelperMixin: + + def make_rpc_boot_image_for(self, node, purpose): + osystem = node.get_osystem() + series = node.get_distro_series() + arch, subarch = node.split_arch() + return make_rpc_boot_image( + osystem=osystem, release=series, + architecture=arch, subarchitecture=subarch, + purpose=purpose) + + def configure_get_boot_images_for_node(self, node, purpose): + boot_image = self.make_rpc_boot_image_for(node, purpose) + self.patch( + preseed_module, + 'get_boot_images_for').return_value = [boot_image] + + class TestSplitSubArch(MAASServerTestCase): """Tests for `split_subarch`.""" @@ -81,91 +124,149 @@ self.assertEqual(['amd64', 'test'], split_subarch('amd64/test')) -class TestGetHostnameAndPath(MAASServerTestCase): - """Tests for `get_hostname_and_path`.""" +class TestGetNetlocAndPath(MAASServerTestCase): + """Tests for `get_netloc_and_path`.""" - def test_get_hostname_and_path(self): + def test_get_netloc_and_path(self): input_and_results = [ + ('http://name.domain:66/my/path', ('name.domain:66', '/my/path')), + ('http://name.domain:80/my/path', ('name.domain:80', '/my/path')), ('http://name.domain/my/path', ('name.domain', '/my/path')), ('https://domain/path', ('domain', '/path')), + ('http://domain:12', ('domain:12', '')), ('http://domain/', ('domain', '/')), ('http://domain', ('domain', '')), ] inputs = [input for input, _ in input_and_results] results = [result for _, result in input_and_results] - self.assertEqual(results, map(get_hostname_and_path, inputs)) + self.assertEqual(results, map(get_netloc_and_path, inputs)) class TestGetPreseedFilenames(MAASServerTestCase): """Tests for `get_preseed_filenames`.""" - def test_get_preseed_filenames_returns_filenames(self): - hostname = factory.getRandomString() - prefix = factory.getRandomString() - release = factory.getRandomString() - node = factory.make_node(hostname=hostname) + def test__returns_filenames(self): + hostname = factory.make_string() + prefix = factory.make_string() + osystem = factory.make_string() + release = factory.make_string() + node = factory.make_Node(hostname=hostname) arch, subarch = node.architecture.split('/') self.assertSequenceEqual( [ - '%s_%s_%s_%s_%s' % (prefix, arch, subarch, release, hostname), - '%s_%s_%s_%s' % (prefix, arch, subarch, release), - '%s_%s_%s' % (prefix, arch, subarch), - '%s_%s' % (prefix, arch), + '%s_%s_%s_%s_%s_%s' % ( + prefix, osystem, arch, subarch, release, hostname), + '%s_%s_%s_%s_%s' % (prefix, osystem, arch, subarch, release), + '%s_%s_%s_%s' % (prefix, osystem, arch, subarch), + '%s_%s_%s' % (prefix, osystem, arch), + '%s_%s' % (prefix, osystem), '%s' % prefix, 'generic', ], - list(get_preseed_filenames(node, prefix, release, default=True))) + list(get_preseed_filenames( + node, prefix, osystem, release, default=True))) - def test_get_preseed_filenames_if_node_is_None(self): - release = factory.getRandomString() - prefix = factory.getRandomString() + def test__returns_limited_filenames_if_node_is_None(self): + osystem = factory.make_string() + release = factory.make_string() + prefix = factory.make_string() self.assertSequenceEqual( [ - '%s_%s' % (prefix, release), + '%s_%s_%s' % (prefix, osystem, release), + '%s_%s' % (prefix, osystem), '%s' % prefix, ], - list(get_preseed_filenames(None, prefix, release))) + list(get_preseed_filenames(None, prefix, osystem, release))) - def test_get_preseed_filenames_supports_empty_prefix(self): - hostname = factory.getRandomString() - release = factory.getRandomString() - node = factory.make_node(hostname=hostname) + def test__supports_empty_prefix(self): + hostname = factory.make_string() + osystem = factory.make_string() + release = factory.make_string() + node = factory.make_Node(hostname=hostname) arch, subarch = node.architecture.split('/') self.assertSequenceEqual( [ - '%s_%s_%s_%s' % (arch, subarch, release, hostname), - '%s_%s_%s' % (arch, subarch, release), - '%s_%s' % (arch, subarch), - '%s' % arch, + '%s_%s_%s_%s_%s' % (osystem, arch, subarch, release, hostname), + '%s_%s_%s_%s' % (osystem, arch, subarch, release), + '%s_%s_%s' % (osystem, arch, subarch), + '%s_%s' % (osystem, arch), + '%s' % osystem, ], - list(get_preseed_filenames(node, '', release))) + list(get_preseed_filenames(node, '', osystem, release))) - def test_get_preseed_filenames_returns_list_without_default(self): + def test__returns_list_without_default(self): # If default=False is passed to get_preseed_filenames, the # returned list won't include the default template name as a # last resort template. - hostname = factory.getRandomString() - prefix = factory.getRandomString() - release = factory.getRandomString() - node = factory.make_node(hostname=hostname) + hostname = factory.make_string() + prefix = factory.make_string() + release = factory.make_string() + node = factory.make_Node(hostname=hostname) self.assertSequenceEqual( 'generic', list(get_preseed_filenames( node, prefix, release, default=True))[-1]) - def test_get_preseed_filenames_returns_list_with_default(self): + def test__returns_list_with_default(self): # If default=True is passed to get_preseed_filenames, the # returned list will include the default template name as a # last resort template. - hostname = factory.getRandomString() - prefix = factory.getRandomString() - release = factory.getRandomString() - node = factory.make_node(hostname=hostname) + hostname = factory.make_string() + prefix = factory.make_string() + release = factory.make_string() + node = factory.make_Node(hostname=hostname) self.assertSequenceEqual( prefix, list(get_preseed_filenames( node, prefix, release, default=False))[-1]) + def test__returns_backward_compatible_name_for_ubuntu_without_prefix(self): + # If the OS is Ubuntu, also include backward-compatible filenames. + # See bug 1439366 for details. + hostname = factory.make_string() + osystem = UbuntuOS().name + release = factory.make_string() + node = factory.make_Node(hostname=hostname) + arch, subarch = node.architecture.split('/') + self.assertSequenceEqual( + [ + '%s_%s_%s_%s_%s' % (osystem, arch, subarch, release, hostname), + '%s_%s_%s_%s' % (arch, subarch, release, hostname), + '%s_%s_%s_%s' % (osystem, arch, subarch, release), + '%s_%s_%s' % (arch, subarch, release), + '%s_%s_%s' % (osystem, arch, subarch), + '%s_%s' % (arch, subarch), + '%s_%s' % (osystem, arch), + '%s' % (arch), + '%s' % osystem, + ], + list(get_preseed_filenames(node, '', osystem, release))) + + def test__returns_backward_compatible_name_for_ubuntu_with_prefix(self): + # If the OS is Ubuntu, also include backward-compatible filenames. + # See bug 1439366 for details. + hostname = factory.make_string() + osystem = UbuntuOS().name + release = factory.make_string() + node = factory.make_Node(hostname=hostname) + arch, subarch = node.architecture.split('/') + prefix = factory.make_string() + self.assertSequenceEqual( + [ + '%s_%s_%s_%s_%s_%s' % ( + prefix, osystem, arch, subarch, release, hostname), + '%s_%s_%s_%s_%s' % (prefix, arch, subarch, release, hostname), + '%s_%s_%s_%s_%s' % (prefix, osystem, arch, subarch, release), + '%s_%s_%s_%s' % (prefix, arch, subarch, release), + '%s_%s_%s_%s' % (prefix, osystem, arch, subarch), + '%s_%s_%s' % (prefix, arch, subarch), + '%s_%s_%s' % (prefix, osystem, arch), + '%s_%s' % (prefix, arch), + '%s_%s' % (prefix, osystem), + '%s' % prefix, + ], + list(get_preseed_filenames(node, prefix, osystem, release))) + class TestConfiguration(MAASServerTestCase): """Test for correct configuration of the preseed component.""" @@ -186,7 +287,7 @@ self.assertEqual( (None, None), get_preseed_template( - (factory.getRandomString(), factory.getRandomString()))) + (factory.make_string(), factory.make_string()))) def test_get_preseed_template_returns_None_when_no_filenames(self): # get_preseed_template() returns None when no filenames are passed in. @@ -194,7 +295,7 @@ self.assertEqual((None, None), get_preseed_template(())) def test_get_preseed_template_find_template_in_first_location(self): - template_content = factory.getRandomString() + template_content = factory.make_string() template_path = self.make_file(contents=template_content) template_filename = os.path.basename(template_path) locations = [ @@ -207,7 +308,7 @@ get_preseed_template([template_filename])) def test_get_preseed_template_find_template_in_last_location(self): - template_content = factory.getRandomString() + template_content = factory.make_string() template_path = self.make_file(contents=template_content) template_filename = os.path.basename(template_path) locations = [ @@ -236,22 +337,22 @@ path = os.path.join(self.location, name) rendered_content = None if content is None: - rendered_content = factory.getRandomString() + rendered_content = factory.make_string() content = b'{{def stuff}}%s{{enddef}}{{stuff}}' % rendered_content with open(path, "wb") as outf: outf.write(content) return rendered_content def test_load_preseed_template_returns_PreseedTemplate(self): - name = factory.getRandomString() + name = factory.make_string() self.create_template(self.location, name) - node = factory.make_node() + node = factory.make_Node() template = load_preseed_template(node, name) self.assertIsInstance(template, PreseedTemplate) def test_load_preseed_template_raises_if_no_template(self): - node = factory.make_node() - unknown_template_name = factory.getRandomString() + node = factory.make_Node() + unknown_template_name = factory.make_string() self.assertRaises( TemplateNotFoundError, load_preseed_template, node, unknown_template_name) @@ -260,66 +361,67 @@ # The template lookup method ends up picking up a template named # 'generic' if no more specific template exist. content = self.create_template(self.location, GENERIC_FILENAME) - node = factory.make_node(hostname=factory.getRandomString()) - template = load_preseed_template(node, factory.getRandomString()) + node = factory.make_Node(hostname=factory.make_string()) + template = load_preseed_template(node, factory.make_string()) self.assertEqual(content, template.substitute()) def test_load_preseed_template_prefix_lookup(self): # 2nd last in the hierarchy is a template named 'prefix'. - prefix = factory.getRandomString() + prefix = factory.make_string() # Create the generic template. This one will be ignored due to the # presence of a more specific template. self.create_template(self.location, GENERIC_FILENAME) # Create the 'prefix' template. This is the one which will be # picked up. content = self.create_template(self.location, prefix) - node = factory.make_node(hostname=factory.getRandomString()) + node = factory.make_Node(hostname=factory.make_string()) template = load_preseed_template(node, prefix) self.assertEqual(content, template.substitute()) def test_load_preseed_template_node_specific_lookup(self): # At the top of the lookup hierarchy is a template specific to this # node. It will be used first if it's present. - prefix = factory.getRandomString() - release = factory.getRandomString() + prefix = factory.make_string() + osystem = factory.make_string() + release = factory.make_string() # Create the generic and 'prefix' templates. They will be ignored # due to the presence of a more specific template. self.create_template(self.location, GENERIC_FILENAME) self.create_template(self.location, prefix) - node = factory.make_node(hostname=factory.getRandomString()) - node_template_name = "%s_%s_%s_%s" % ( - prefix, node.architecture.replace('/', '_'), + node = factory.make_Node(hostname=factory.make_string()) + node_template_name = "%s_%s_%s_%s_%s" % ( + prefix, osystem, node.architecture.replace('/', '_'), release, node.hostname) # Create the node-specific template. content = self.create_template(self.location, node_template_name) - template = load_preseed_template(node, prefix, release) + template = load_preseed_template(node, prefix, osystem, release) self.assertEqual(content, template.substitute()) def test_load_preseed_template_with_inherits(self): # A preseed file can "inherit" from another file. - prefix = factory.getRandomString() + prefix = factory.make_string() # Create preseed template. - master_template_name = factory.getRandomString() + master_template_name = factory.make_string() preseed_content = '{{inherit "%s"}}' % master_template_name self.create_template(self.location, prefix, preseed_content) master_content = self.create_template( self.location, master_template_name) - node = factory.make_node() + node = factory.make_Node() template = load_preseed_template(node, prefix) self.assertEqual(master_content, template.substitute()) def test_load_preseed_template_parent_lookup_doesnt_include_default(self): # The lookup for parent templates does not include the default # 'generic' file. - prefix = factory.getRandomString() + prefix = factory.make_string() # Create 'generic' template. It won't be used because the # lookup for parent templates does not use the 'generic' template. self.create_template(self.location, GENERIC_FILENAME) - unknown_master_template_name = factory.getRandomString() + unknown_master_template_name = factory.make_string() # Create preseed template. preseed_content = '{{inherit "%s"}}' % unknown_master_template_name self.create_template(self.location, prefix, preseed_content) - node = factory.make_node() + node = factory.make_Node() template = load_preseed_template(node, prefix) self.assertRaises( TemplateNotFoundError, template.substitute) @@ -330,7 +432,7 @@ def make_bare_nodegroup(self): """Create `NodeGroup` without interfaces.""" - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() nodegroup.nodegroupinterface_set.all().delete() return nodegroup @@ -342,18 +444,18 @@ Other network settings are derived from the IP address. """ network = make_network(ip, subnet_mask) - return factory.make_node_group_interface( + return factory.make_NodeGroupInterface( nodegroup=nodegroup, management=mgt, network=network, ip=ip, subnet_mask=subnet_mask) def make_lease_for_node(self, node, ip=None): """Create a `MACAddress` and corresponding `DHCPLease` for `node`.""" - mac = factory.make_mac_address(node=node).mac_address - factory.make_dhcp_lease(nodegroup=node.nodegroup, mac=mac, ip=ip) + mac = factory.make_MACAddress(node=node).mac_address + factory.make_DHCPLease(nodegroup=node.nodegroup, mac=mac, ip=ip) def test_returns_only_interface(self): - node = factory.make_node() - [interface] = list(node.nodegroup.nodegroupinterface_set.all()) + node = factory.make_Node() + interface = factory.make_NodeGroupInterface(node.nodegroup) address = pick_cluster_controller_address(node) @@ -366,7 +468,7 @@ self.make_nodegroupinterface(nodegroup, '192.168.11.1') self.make_nodegroupinterface(nodegroup, nearest_address) self.make_nodegroupinterface(nodegroup, '192.168.22.1') - node = factory.make_node(nodegroup=nodegroup) + node = factory.make_Node(nodegroup=nodegroup) self.make_lease_for_node(node, '192.168.33.101') self.make_lease_for_node(node, '10.99.1.105') self.make_lease_for_node(node, '192.168.44.101') @@ -378,79 +480,71 @@ nodegroup, '10.100.100.1', mgt=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) self.make_nodegroupinterface(nodegroup, '10.100.101.1') - node = factory.make_node(nodegroup=nodegroup) + node = factory.make_Node(nodegroup=nodegroup) self.make_lease_for_node(node, '10.100.101.99') self.assertEqual('10.100.101.1', pick_cluster_controller_address(node)) def test_prefers_managed_interface_over_unmanaged_interface(self): nodegroup = self.make_bare_nodegroup() - factory.make_node_group_interface( + factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - best_interface = factory.make_node_group_interface( + best_interface = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) - factory.make_node_group_interface( + factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) address = pick_cluster_controller_address( - factory.make_node(nodegroup=nodegroup)) + factory.make_Node(nodegroup=nodegroup)) self.assertIsNotNone(address) self.assertEqual(best_interface.ip, address) def test_prefers_dns_managed_interface_over_unmanaged_interface(self): nodegroup = self.make_bare_nodegroup() - factory.make_node_group_interface( + factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - best_interface = factory.make_node_group_interface( + best_interface = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - factory.make_node_group_interface( + factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) address = pick_cluster_controller_address( - factory.make_node(nodegroup=nodegroup)) + factory.make_Node(nodegroup=nodegroup)) self.assertIsNotNone(address) self.assertEqual(best_interface.ip, address) def test_returns_None_if_no_interfaces(self): - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() nodegroup.nodegroupinterface_set.all().delete() self.assertIsNone( pick_cluster_controller_address( - factory.make_node(nodegroup=nodegroup))) + factory.make_Node(nodegroup=nodegroup))) def test_makes_consistent_choice(self): # Not a very thorough test, but we want at least a little bit of # predictability. - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) for _ in range(5): - factory.make_node_group_interface( + factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - node = factory.make_node(nodegroup=nodegroup) + node = factory.make_Node(nodegroup=nodegroup) self.assertEqual( pick_cluster_controller_address(node), pick_cluster_controller_address(node)) -def make_url(name): - """Create a fake archive URL.""" - return "http://%s.example.com/%s/" % ( - factory.make_name(name), - factory.make_name('path'), - ) - - class TestPreseedContext(MAASServerTestCase): """Tests for `get_preseed_context`.""" def test_get_preseed_context_contains_keys(self): - release = factory.getRandomString() - nodegroup = factory.make_node_group(maas_url=factory.getRandomString()) + release = factory.make_string() + nodegroup = factory.make_NodeGroup(maas_url=factory.make_string()) context = get_preseed_context(release, nodegroup) self.assertItemsEqual( - ['release', 'metadata_enlist_url', 'server_host', 'server_url', - 'main_archive_hostname', 'main_archive_directory', + ['osystem', 'release', 'metadata_enlist_url', 'server_host', + 'server_url', 'main_archive_hostname', 'main_archive_directory', 'ports_archive_hostname', 'ports_archive_directory', 'http_proxy'], context) @@ -458,12 +552,12 @@ def test_get_preseed_context_archive_refs(self): # urlparse lowercases the hostnames. That should not have any # impact but for testing, create lower-case hostnames. - main_archive = make_url('main_archive') - ports_archive = make_url('ports_archive') + main_archive = factory.make_url(netloc="main-archive.example.com") + ports_archive = factory.make_url(netloc="ports-archive.example.com") Config.objects.set_config('main_archive', main_archive) Config.objects.set_config('ports_archive', ports_archive) - nodegroup = factory.make_node_group(maas_url=factory.getRandomString()) - context = get_preseed_context(factory.make_node(), nodegroup) + nodegroup = factory.make_NodeGroup(maas_url=factory.make_string()) + context = get_preseed_context(factory.make_Node(), nodegroup) parsed_main_archive = urlparse(main_archive) parsed_ports_archive = urlparse(ports_archive) self.assertEqual( @@ -481,24 +575,27 @@ )) -class TestNodePreseedContext(MAASServerTestCase): +class TestNodePreseedContext( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_node_preseed_context`.""" def test_get_node_preseed_context_contains_keys(self): - node = factory.make_node() - release = factory.getRandomString() + node = factory.make_Node(nodegroup=self.rpc_nodegroup) + self.configure_get_boot_images_for_node(node, 'install') + release = factory.make_string() context = get_node_preseed_context(node, release) self.assertItemsEqual( ['driver', 'driver_package', 'node', 'node_disable_pxe_data', 'node_disable_pxe_url', - 'preseed_data', 'third_party_drivers', + 'preseed_data', 'third_party_drivers', 'license_key', ], context) def test_context_contains_third_party_drivers(self): - node = factory.make_node() - release = factory.getRandomString() - enable_third_party_drivers = factory.getRandomBoolean() + node = factory.make_Node(nodegroup=self.rpc_nodegroup) + self.configure_get_boot_images_for_node(node, 'install') + release = factory.make_string() + enable_third_party_drivers = factory.pick_bool() Config.objects.set_config( 'enable_third_party_drivers', enable_third_party_drivers) context = get_node_preseed_context(node, release) @@ -517,7 +614,8 @@ self.assertEqual(quote(var), observed) -class TestRenderPreseed(MAASServerTestCase): +class TestRenderPreseed( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `render_preseed`. These tests check that the templates render (i.e. that no variable is @@ -533,7 +631,8 @@ ] def test_render_preseed(self): - node = factory.make_node() + node = factory.make_Node(nodegroup=self.rpc_nodegroup) + self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed(node, self.preseed, "precise") # The test really is that the preseed is rendered without an # error. @@ -541,10 +640,12 @@ def test_get_preseed_uses_nodegroup_maas_url(self): ng_url = 'http://%s' % factory.make_hostname() - ng = factory.make_node_group(maas_url=ng_url) + self.rpc_nodegroup.maas_url = ng_url + self.rpc_nodegroup.save() maas_url = 'http://%s' % factory.make_hostname() - node = factory.make_node( - nodegroup=ng, status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, status=NODE_STATUS.COMMISSIONING) + self.configure_get_boot_images_for_node(node, 'install') self.patch(settings, 'DEFAULT_MAAS_URL', maas_url) preseed = render_preseed(node, self.preseed, "precise") self.assertThat( @@ -576,35 +677,307 @@ ng_url = 'http://%s' % factory.make_hostname() maas_url = 'http://%s' % factory.make_hostname() self.patch(settings, 'DEFAULT_MAAS_URL', maas_url) - nodegroup = factory.make_node_group(maas_url=ng_url) + nodegroup = factory.make_NodeGroup(maas_url=ng_url) preseed = render_enlistment_preseed( self.preseed, "precise", nodegroup=nodegroup) self.assertThat( preseed, MatchesAll(*[Contains(ng_url), Not(Contains(maas_url))])) -class TestGetCurtinUserData(MAASServerTestCase): +class TestRenderPreseedWindows( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): + """Tests for `render_preseed`. + + These tests check that the templates render (i.e. that no variable is + missing). + """ + + # Create a scenario for each possible windows release. + scenarios = [ + (release, {'release': release}) + for release in ['win2012', 'win2012hv', 'win2012hvr2', 'win2012r2'] + ] + + def return_windows_specific_preseed_data(self): + rpc_get_preseed_data = self.rpc_cluster.GetPreseedData + rpc_get_preseed_data.side_effect = None + rpc_get_preseed_data.return_value = defer.succeed({"data": { + 'maas_metadata_url': factory.make_name("metadata-url"), + 'maas_oauth_consumer_secret': factory.make_name("consumer-secret"), + 'maas_oauth_consumer_key': factory.make_name("consumer-key"), + 'maas_oauth_token_key': factory.make_name("token-key"), + 'maas_oauth_token_secret': factory.make_name("token-secret"), + 'hostname': factory.make_name("hostname"), + }}) + + def test_render_preseed(self): + self.return_windows_specific_preseed_data() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, osystem='windows', + architecture='amd64/generic', distro_series=self.release, + status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'install') + preseed = render_preseed( + node, '', osystem='windows', release=self.release) + # The test really is that the preseed is rendered without an + # error. + self.assertIsInstance(preseed, bytes) + + +class TestListGatewaysAndMACs(MAASServerTestCase): + + def test__lists_known_gateways(self): + network = factory.make_ipv4_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network) + gateway = factory.pick_ip_in_network(network) + mac = node.get_primary_mac() + mac.cluster_interface.router_ip = gateway + mac.cluster_interface.save() + self.assertEqual( + {(gateway, mac.mac_address)}, + list_gateways_and_macs(node)) + + def test__lists_gateways_from_all_associated_cluster_interfaces(self): + # XXX jtv 2014-09-16 bug=1358130: There's a quick-and-dirty solution + # where all cluster interfaces on the same cluster network interface + # are all considered connected to the same node MACs. And so, + # list_gateways_and_macs must respect that association, rather than + # just query MACAddress.cluster_interface. + ipv4_network = factory.make_ipv4_network() + ipv6_network = factory.make_ipv6_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=ipv4_network) + ipv4_gateway = factory.pick_ip_in_network(ipv4_network) + mac = node.get_primary_mac() + mac.cluster_interface.router_ip = ipv4_gateway + mac.cluster_interface.save() + ipv6_gateway = factory.pick_ip_in_network(ipv6_network) + factory.make_NodeGroupInterface( + node.nodegroup, interface=mac.cluster_interface.interface, + network=ipv6_network, router_ip=ipv6_gateway) + self.assertEqual( + { + (ipv4_gateway, mac.mac_address), + (ipv6_gateway, mac.mac_address), + }, + list_gateways_and_macs(node)) + + def test__skips_unknown_cluster_interfaces(self): + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + mac = node.get_primary_mac() + mac.cluster_interface = None + mac.save() + self.assertEqual(set(), list_gateways_and_macs(node)) + + def test__skips_unknown_routers(self): + node = factory.make_node_with_mac_attached_to_nodegroupinterface() + mac = node.get_primary_mac() + mac.cluster_interface.router_ip = None + mac.cluster_interface.management = ( + NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + mac.cluster_interface.save() + self.assertEqual(set(), list_gateways_and_macs(node)) + + +class TestComposeCurtinMAASReporter(MAASServerTestCase): + + def load_reporter(self, preseeds): + [reporter_yaml] = preseeds + return yaml.safe_load(reporter_yaml) + + def test__returns_list_of_yaml_strings(self): + preseeds = compose_curtin_maas_reporter(factory.make_Node()) + self.assertIsInstance(preseeds, list) + self.assertThat(preseeds, HasLength(1)) + reporter = self.load_reporter(preseeds) + self.assertIsInstance(reporter, dict) + self.assertEqual(['reporter'], list(reporter.keys())) + + def test__returns_reporter_url(self): + node = factory.make_Node() + preseeds = compose_curtin_maas_reporter(node) + reporter = self.load_reporter(preseeds) + self.assertEqual( + absolute_reverse( + 'curtin-metadata-version', args=['latest'], + query={'op': 'signal'}, base_url=node.nodegroup.maas_url), + reporter['reporter']['maas']['url']) + + def test__returns_reporter_oauth_creds(self): + node = factory.make_Node() + token = NodeKey.objects.get_token_for_node(node) + preseeds = compose_curtin_maas_reporter(node) + reporter = self.load_reporter(preseeds) + self.assertEqual( + token.consumer.key, + reporter['reporter']['maas']['consumer_key']) + self.assertEqual( + token.key, + reporter['reporter']['maas']['token_key']) + self.assertEqual( + token.secret, + reporter['reporter']['maas']['token_secret']) + + +class TestComposeCurtinNetworkPreseed(MAASServerTestCase): + + def test__returns_list_of_yaml_strings(self): + preseeds = compose_curtin_network_preseed( + factory.make_Node(osystem='ubuntu')) + self.assertIsInstance(preseeds, list) + self.assertThat(preseeds, HasLength(2)) + [write_files_yaml, late_commands_yaml] = preseeds + write_files = yaml.safe_load(write_files_yaml) + self.assertIsInstance(write_files, dict) + self.assertEqual(['write_files'], list(write_files.keys())) + late_commands = yaml.safe_load(late_commands_yaml) + self.assertIsInstance(late_commands, dict) + self.assertEqual(['late_commands'], list(late_commands.keys())) + + def test__returns_empty_if_unsupported_OS(self): + self.assertEqual( + [], + compose_curtin_network_preseed( + factory.make_Node(osystem='windows'))) + + def test__uploads_script_if_supported_OS(self): + [write_files_yaml, _] = compose_curtin_network_preseed( + factory.make_Node(osystem='ubuntu')) + write_files = yaml.safe_load(write_files_yaml) + file_spec = write_files['write_files']['maas_configure_interfaces'] + self.expectThat( + file_spec['path'], + Equals('/usr/local/bin/maas_configure_interfaces.py')) + self.expectThat(file_spec['permissions'], Equals('0755')) + script = locate_config( + 'templates', 'deployment-user-data', + 'maas_configure_interfaces.py') + self.expectThat(file_spec['content'], Equals(read_text_file(script))) + + def test__runs_script_if_supported_OS(self): + node = factory.make_Node() + # Let the node default to Ubuntu, which is supported for IPv6. + node.osystem = '' + node.save() + [_, late_commands_yaml] = compose_curtin_network_preseed(node) + late_commands = yaml.safe_load(late_commands_yaml) + command = ( + late_commands['late_commands']['90_maas_configure_interfaces']) + self.assertIsInstance(command, list) + self.assertEqual( + ['curtin', 'in-target', '--'], + command[:3]) + self.assertIn('/usr/local/bin/maas_configure_interfaces.py', command) + self.assertIn('--update-interfaces', command) + self.assertIn('--name-interfaces', command) + + def test__includes_static_IPv6_addresses(self): + network = factory.make_ipv6_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network, osystem='ubuntu') + mac = node.get_primary_mac() + ip = factory.pick_ip_in_network(network) + factory.make_StaticIPAddress(mac=mac, ip=ip) + [_, late_commands_yaml] = compose_curtin_network_preseed(node) + late_commands = yaml.safe_load(late_commands_yaml) + [command] = list(late_commands['late_commands'].values()) + self.assertIn('--static-ip=%s=%s' % (ip, mac), command) + + def test__ignores_static_IPv4_addresses(self): + network = factory.make_ipv4_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network, osystem='ubuntu') + mac = node.get_primary_mac() + ip = factory.pick_ip_in_network(network) + factory.make_StaticIPAddress(mac=mac, ip=ip) + [_, late_commands_yaml] = compose_curtin_network_preseed(node) + late_commands = yaml.safe_load(late_commands_yaml) + [command] = list(late_commands['late_commands'].values()) + self.assertNotIn(ip, ' '.join(command)) + + def test__includes_IPv6_gateway_addresses(self): + network = factory.make_ipv6_network() + gateway = factory.pick_ip_in_network(network) + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network, osystem='ubuntu') + mac = node.get_primary_mac() + mac.cluster_interface.router_ip = gateway + mac.cluster_interface.save() + [_, late_commands_yaml] = compose_curtin_network_preseed(node) + late_commands = yaml.safe_load(late_commands_yaml) + [command] = list(late_commands['late_commands'].values()) + self.assertIn('--gateway=%s=%s' % (gateway, mac), command) + + def test__ignores_IPv4_gateway_addresses(self): + network = factory.make_ipv4_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + network=network, osystem='ubuntu') + mac = node.get_primary_mac() + gateway = factory.pick_ip_in_network(network) + mac.cluster_interface.router_ip = gateway + mac.cluster_interface.save() + [_, late_commands_yaml] = compose_curtin_network_preseed(node) + late_commands = yaml.safe_load(late_commands_yaml) + [command] = list(late_commands['late_commands'].values()) + self.assertNotIn('--gateway', ' '.join(command)) + self.assertNotIn(gateway, ' '.join(command)) + + +class TestGetCurtinUserData( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_curtin_userdata`.""" def test_get_curtin_userdata(self): - node = factory.make_node() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH, + mac=True) + factory.make_NodeGroupInterface( + node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') - factory.make_boot_image( - architecture=arch, subarchitecture=subarch, - release=node.get_distro_series(), purpose='xinstall', - nodegroup=node.nodegroup) - node.use_fastpath_installer() + self.configure_get_boot_images_for_node(node, 'xinstall') + + user_data = get_curtin_userdata(node) + + self.expectThat( + self.rpc_cluster.ComposeCurtinNetworkPreseed, + MockCalledOnceWith( + ANY, osystem=node.get_osystem(), config=ANY, + disable_ipv4=node.disable_ipv4)) + self.assertIn("PREFIX='curtin'", user_data) + + +class TestGetCurtinUserDataOS( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): + """Tests for `get_curtin_userdata` using os specific scenarios.""" + + # Create a scenario for each possible os specific preseed. + scenarios = [ + (name, {'os_name': name}) + for name in ['centos', 'suse', 'windows'] + ] + + def test_get_curtin_userdata(self): + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, osystem=self.os_name, + boot_type=NODE_BOOT.FASTPATH, mac=True) + factory.make_NodeGroupInterface( + node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + arch, subarch = node.architecture.split('/') + self.configure_get_boot_images_for_node(node, 'xinstall') user_data = get_curtin_userdata(node) # Just check that the user data looks good. self.assertIn("PREFIX='curtin'", user_data) -class TestCurtinUtilities(MAASServerTestCase): +class TestCurtinUtilities( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for the curtin-related utilities.""" def test_get_curtin_config(self): - node = factory.make_node() - node.use_fastpath_installer() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH) + self.configure_get_boot_images_for_node(node, 'xinstall') config = get_curtin_config(node) self.assertThat( config, @@ -624,8 +997,9 @@ if main_arch is None: main_arch = factory.make_name('arch') arch = '%s/%s' % (main_arch, factory.make_name('subarch')) - node = factory.make_node(architecture=arch) - node.use_fastpath_installer() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, architecture=arch, + boot_type=NODE_BOOT.FASTPATH) return node def extract_archive_setting(self, userdata): @@ -650,6 +1024,7 @@ def test_get_curtin_config_uses_main_archive_for_i386(self): node = self.make_fastpath_node('i386') + self.configure_get_boot_images_for_node(node, 'xinstall') userdata = get_curtin_config(node) self.assertEqual( self.summarise_url(Config.objects.get_config('main_archive')), @@ -657,6 +1032,7 @@ def test_get_curtin_config_uses_main_archive_for_amd64(self): node = self.make_fastpath_node('amd64') + self.configure_get_boot_images_for_node(node, 'xinstall') userdata = get_curtin_config(node) self.assertEqual( self.summarise_url(Config.objects.get_config('main_archive')), @@ -664,85 +1040,282 @@ def test_get_curtin_config_uses_ports_archive_for_other_arch(self): node = self.make_fastpath_node() + self.configure_get_boot_images_for_node(node, 'xinstall') userdata = get_curtin_config(node) self.assertEqual( self.summarise_url(Config.objects.get_config('ports_archive')), self.summarise_url(self.extract_archive_setting(userdata))) def test_get_curtin_context(self): - node = factory.make_node() - node.use_fastpath_installer() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH) context = get_curtin_context(node) - self.assertItemsEqual(['curtin_preseed'], context) + self.assertItemsEqual( + ['curtin_preseed'], context) self.assertIn('cloud-init', context['curtin_preseed']) + def test_get_curtin_image_calls_get_boot_images_for(self): + osystem = factory.make_name('os') + series = factory.make_name('series') + architecture = make_usable_architecture(self) + arch, subarch = architecture.split('/') + node = factory.make_Node( + osystem=osystem, distro_series=series, architecture=architecture) + mock_get_boot_images_for = self.patch( + preseed_module, 'get_boot_images_for') + mock_get_boot_images_for.return_value = [ + make_rpc_boot_image(purpose='xinstall')] + get_curtin_image(node) + self.assertThat( + mock_get_boot_images_for, + MockCalledOnceWith(node.nodegroup, osystem, arch, subarch, series)) + + def test_get_curtin_image_raises_ClusterUnavailable(self): + node = factory.make_Node() + self.patch( + preseed_module, + 'get_boot_images_for').side_effect = NoConnectionsAvailable + self.assertRaises(ClusterUnavailable, get_curtin_image, node) + + def test_get_curtin_image_raises_MissingBootImage(self): + node = factory.make_Node() + self.patch( + preseed_module, + 'get_boot_images_for').return_value = [] + self.assertRaises(MissingBootImage, get_curtin_image, node) + + def test_get_curtin_image_returns_xinstall_image(self): + node = factory.make_Node() + other_images = [make_rpc_boot_image() for _ in range(3)] + xinstall_image = make_rpc_boot_image(purpose='xinstall') + images = other_images + [xinstall_image] + self.patch( + preseed_module, + 'get_boot_images_for').return_value = images + self.assertEqual(xinstall_image, get_curtin_image(node)) + def test_get_curtin_installer_url_returns_url(self): - # Exclude DISTRO_SERIES.default. It's a special value that defers - # to a run-time setting which we don't provide in this test. - series = factory.getRandomEnum( - DISTRO_SERIES, but_not=DISTRO_SERIES.default) + osystem = make_usable_osystem(self) + series = osystem['default_release'] architecture = make_usable_architecture(self) - node = factory.make_node( + xinstall_path = factory.make_name('xi_path') + xinstall_type = factory.make_name('xi_type') + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, osystem=osystem['name'], architecture=architecture, distro_series=series) + factory.make_NodeGroupInterface( + node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = architecture.split('/') - boot_image = factory.make_boot_image( - architecture=arch, subarchitecture=subarch, release=series, - purpose='xinstall', nodegroup=node.nodegroup) + boot_image = make_rpc_boot_image( + osystem=osystem['name'], release=series, + architecture=arch, subarchitecture=subarch, + purpose='xinstall', xinstall_path=xinstall_path, + xinstall_type=xinstall_type) + self.patch( + preseed_module, + 'get_boot_images_for').return_value = [boot_image] installer_url = get_curtin_installer_url(node) [interface] = node.nodegroup.get_managed_interfaces() self.assertEqual( - 'http://%s/MAAS/static/images/%s/%s/%s/%s/root-tgz' % ( - interface.ip, arch, subarch, series, boot_image.label), + '%s:http://%s/MAAS/static/images/%s/%s/%s/%s/%s/%s' % ( + xinstall_type, interface.ip, osystem['name'], arch, subarch, + series, boot_image['label'], xinstall_path), installer_url) def test_get_curtin_installer_url_fails_if_no_boot_image(self): - series = factory.getRandomEnum( - DISTRO_SERIES, but_not=DISTRO_SERIES.default) + osystem = make_usable_osystem(self) + series = osystem['default_release'] architecture = make_usable_architecture(self) - node = factory.make_node( + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, osystem=osystem['name'], architecture=architecture, distro_series=series) - # Generate a boot image with a different arch/subarch. - factory.make_boot_image( - architecture=factory.make_name('arch'), - subarchitecture=factory.make_name('subarch'), release=series, - purpose='xinstall', nodegroup=node.nodegroup) + # Make boot image that is not xinstall + arch, subarch = architecture.split('/') + boot_image = make_rpc_boot_image( + osystem=osystem['name'], release=series, + architecture=arch, subarchitecture=subarch) + self.patch( + preseed_module, + 'get_boot_images_for').return_value = [boot_image] error = self.assertRaises( - MAASAPIException, get_curtin_installer_url, node) + MissingBootImage, get_curtin_installer_url, node) arch, subarch = architecture.split('/') msg = ( "No image could be found for the given selection: " - "arch=%s, subarch=%s, series=%s, purpose=xinstall." % ( + "os=%s, arch=%s, subarch=%s, series=%s, purpose=xinstall." % ( + osystem['name'], arch, subarch, node.get_distro_series(), )) self.assertIn(msg, "%s" % error) - def test_get_preseed_type_for(self): - normal = factory.make_node() - normal.use_traditional_installer() - fpi = factory.make_node() - fpi.use_fastpath_installer() + def test_get_curtin_installer_url_doesnt_append_on_tgz(self): + osystem = make_usable_osystem(self) + series = osystem['default_release'] + architecture = make_usable_architecture(self) + xinstall_path = factory.make_name('xi_path') + xinstall_type = 'tgz' + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, osystem=osystem['name'], + architecture=architecture, distro_series=series) + factory.make_NodeGroupInterface( + node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) + arch, subarch = architecture.split('/') + boot_image = make_rpc_boot_image( + osystem=osystem['name'], release=series, + architecture=arch, subarchitecture=subarch, + purpose='xinstall', xinstall_path=xinstall_path, + xinstall_type=xinstall_type) + self.patch( + preseed_module, + 'get_boot_images_for').return_value = [boot_image] + + installer_url = get_curtin_installer_url(node) + + [interface] = node.nodegroup.get_managed_interfaces() + self.assertEqual( + 'http://%s/MAAS/static/images/%s/%s/%s/%s/%s/%s' % ( + interface.ip, osystem['name'], arch, subarch, + series, boot_image['label'], xinstall_path), + installer_url) + + def test_get_supported_purposes_for_node_calls_get_boot_images_for(self): + osystem = factory.make_name('os') + series = factory.make_name('series') + architecture = make_usable_architecture(self) + arch, subarch = architecture.split('/') + node = factory.make_Node( + osystem=osystem, distro_series=series, architecture=architecture) + mock_get_boot_images_for = self.patch( + preseed_module, 'get_boot_images_for') + mock_get_boot_images_for.return_value = [ + make_rpc_boot_image(purpose='xinstall')] + get_supported_purposes_for_node(node) + self.assertThat( + mock_get_boot_images_for, + MockCalledOnceWith(node.nodegroup, osystem, arch, subarch, series)) + + def test_get_supported_purposes_for_node_raises_ClusterUnavailable(self): + node = factory.make_Node() + self.patch( + preseed_module, + 'get_boot_images_for').side_effect = NoConnectionsAvailable + self.assertRaises( + ClusterUnavailable, + get_supported_purposes_for_node, node) + + def test_get_supported_purposes_for_node_returns_set_of_purposes(self): + osystem = factory.make_name('os') + series = factory.make_name('series') + architecture = make_usable_architecture(self) + arch, subarch = architecture.split('/') + node = factory.make_Node( + osystem=osystem, distro_series=series, architecture=architecture) + mock_get_boot_images_for = self.patch( + preseed_module, 'get_boot_images_for') + mock_get_boot_images_for.return_value = [ + make_rpc_boot_image(purpose='xinstall'), + make_rpc_boot_image(purpose='xinstall'), + make_rpc_boot_image(purpose='install')] + self.assertItemsEqual( + {'xinstall', 'install'}, + get_supported_purposes_for_node(node)) + + def test_get_available_purpose_for_node_raises_PreseedError(self): + node = factory.make_Node() + self.patch( + preseed_module, + 'get_supported_purposes_for_node').return_value = set() + self.assertRaises( + PreseedError, + get_available_purpose_for_node, [], node) + + def test_get_available_purpose_for_node_returns_best_purpose_match(self): + node = factory.make_Node() + purposes = [factory.make_name('purpose') for _ in range(3)] + purpose = random.choice(purposes) + self.patch( + preseed_module, + 'get_supported_purposes_for_node').return_value = [purpose] + self.assertEqual( + purpose, + get_available_purpose_for_node(purposes, node)) + + def test_get_preseed_type_for_commissioning(self): + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + self.assertEqual( + PRESEED_TYPE.COMMISSIONING, get_preseed_type_for(node)) + + def test_get_preseed_type_for_disk_erasing(self): + node = factory.make_Node(status=NODE_STATUS.DISK_ERASING) + self.assertEqual( + PRESEED_TYPE.COMMISSIONING, get_preseed_type_for(node)) + + def test_get_preseed_type_for_default(self): + node = factory.make_Node( + boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'install') + self.assertEqual( + PRESEED_TYPE.DEFAULT, get_preseed_type_for(node)) + + def test_get_preseed_type_for_curtin(self): + node = factory.make_Node( + boot_type=NODE_BOOT.FASTPATH, status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'xinstall') + self.assertEqual( + PRESEED_TYPE.CURTIN, get_preseed_type_for(node)) + + def test_get_preseed_type_for_default_when_curtin_not_supported(self): + node = factory.make_Node( + boot_type=NODE_BOOT.FASTPATH, status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'install') + self.assertEqual( + PRESEED_TYPE.DEFAULT, get_preseed_type_for(node)) - self.assertEqual(PRESEED_TYPE.DEFAULT, get_preseed_type_for(normal)) - self.assertEqual(PRESEED_TYPE.CURTIN, get_preseed_type_for(fpi)) + def test_get_preseed_type_for_curtin_when_default_not_supported(self): + node = factory.make_Node( + boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'xinstall') + self.assertEqual( + PRESEED_TYPE.CURTIN, get_preseed_type_for(node)) + def test_get_preseed_type_for_poweroff(self): + # A 'ready' node isn't supposed to be powered on and thus + # will get a 'commissioning' preseed in order to be powered + # down. + node = factory.make_Node( + boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.READY) + self.assertEqual( + PRESEED_TYPE.COMMISSIONING, get_preseed_type_for(node)) -class TestRenderPreseedArchives(MAASServerTestCase): + +class TestRenderPreseedArchives( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Test that the default preseed contains the default mirrors.""" def test_render_preseed_uses_default_archives_intel(self): nodes = [ - factory.make_node( + factory.make_Node( + nodegroup=self.rpc_nodegroup, + status=NODE_STATUS.DEPLOYING, architecture=make_usable_architecture( self, arch_name="i386", subarch_name="generic")), - factory.make_node( + factory.make_Node( + nodegroup=self.rpc_nodegroup, + status=NODE_STATUS.DEPLOYING, architecture=make_usable_architecture( self, arch_name="amd64", subarch_name="generic")), ] + boot_images = [ + self.make_rpc_boot_image_for(node, 'install') + for node in nodes + ] + self.patch( + preseed_module, 'get_boot_images_for').return_value = boot_images default_snippets = [ "d-i mirror/http/hostname string archive.ubuntu.com", "d-i mirror/http/directory string /ubuntu", @@ -752,8 +1325,11 @@ self.assertThat(preseed, ContainsAll(default_snippets)) def test_render_preseed_uses_default_archives_arm(self): - node = factory.make_node(architecture=make_usable_architecture( - self, with_subarch=False, arch_name="armhf")) + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, + architecture=make_usable_architecture( + self, arch_name="armhf", subarch_name="generic")) + self.configure_get_boot_images_for_node(node, 'install') default_snippets = [ "d-i mirror/http/hostname string ports.ubuntu.com", "d-i mirror/http/directory string /ubuntu-ports", @@ -762,45 +1338,57 @@ self.assertThat(preseed, ContainsAll(default_snippets)) -class TestPreseedProxy(MAASServerTestCase): +class TestPreseedProxy( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): def test_preseed_uses_default_proxy(self): server_host = factory.make_hostname() url = 'http://%s:%d/%s' % ( - server_host, factory.getRandomPort(), factory.getRandomString()) + server_host, factory.pick_port(), factory.make_string()) self.patch(settings, 'DEFAULT_MAAS_URL', url) expected_proxy_statement = ( "mirror/http/proxy string http://%s:8000" % server_host) + node = factory.make_Node(nodegroup=self.rpc_nodegroup) + self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed( - factory.make_node(), PRESEED_TYPE.DEFAULT, "precise") + node, + PRESEED_TYPE.DEFAULT, "precise") self.assertIn(expected_proxy_statement, preseed) def test_preseed_uses_configured_proxy(self): http_proxy = 'http://%s:%d/%s' % ( - factory.getRandomString(), factory.getRandomPort(), - factory.getRandomString()) + factory.make_string(), factory.pick_port(), factory.make_string()) Config.objects.set_config('http_proxy', http_proxy) expected_proxy_statement = ( "mirror/http/proxy string %s" % http_proxy) + node = factory.make_Node(nodegroup=self.rpc_nodegroup) + self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed( - factory.make_node(), PRESEED_TYPE.DEFAULT, "precise") + node, + PRESEED_TYPE.DEFAULT, "precise") self.assertIn(expected_proxy_statement, preseed) -class TestPreseedMethods(MAASServerTestCase): +class TestPreseedMethods( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_enlist_preseed` and `get_preseed`. These tests check that the preseed templates render and 'look right'. """ def test_get_preseed_returns_default_preseed(self): - node = factory.make_node() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.DEBIAN, + status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'install') preseed = get_preseed(node) self.assertIn('preseed/late_command', preseed) def test_get_preseed_returns_curtin_preseed(self): - node = factory.make_node() - node.use_fastpath_installer() + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH, + status=NODE_STATUS.DEPLOYING) + self.configure_get_boot_images_for_node(node, 'xinstall') preseed = get_preseed(node) curtin_url = reverse('curtin-metadata') self.assertIn(curtin_url, preseed) @@ -810,12 +1398,20 @@ self.assertTrue(preseed.startswith('#cloud-config')) def test_get_preseed_returns_commissioning_preseed(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, status=NODE_STATUS.COMMISSIONING) + preseed = get_preseed(node) + self.assertIn('#cloud-config', preseed) + + def test_get_preseed_returns_commissioning_preseed_for_disk_erasing(self): + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, status=NODE_STATUS.DISK_ERASING) preseed = get_preseed(node) self.assertIn('#cloud-config', preseed) -class TestPreseedURLs(MAASServerTestCase): +class TestPreseedURLs( + PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for functions that return preseed URLs.""" def test_compose_enlistment_preseed_url_links_to_enlistment_preseed(self): @@ -831,7 +1427,8 @@ compose_enlistment_preseed_url(), StartsWith(url)) def test_compose_preseed_url_links_to_preseed_for_node(self): - node = factory.make_node() + node = factory.make_Node(nodegroup=self.rpc_nodegroup) + self.configure_get_boot_images_for_node(node, 'install') response = self.client.get(compose_preseed_url(node)) self.assertEqual( (httplib.OK, get_preseed(node)), @@ -839,5 +1436,5 @@ def test_compose_preseed_url_returns_absolute_link(self): self.assertThat( - compose_preseed_url(factory.make_node()), + compose_preseed_url(factory.make_Node()), StartsWith('http://')) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_rabbit.py maas-1.7.6+bzr3376/src/maasserver/tests/test_rabbit.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_rabbit.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_rabbit.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Rabbit messaging tests.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - - -import socket -import time - -from amqplib import client_0_8 as amqp -from django.conf import settings -from maasserver.exceptions import NoRabbit -from maasserver.rabbit import ( - RabbitBase, - RabbitExchange, - RabbitMessaging, - RabbitQueue, - RabbitSession, - ) -from maasserver.testing.factory import factory -from maasserver.testing.rabbit import uses_rabbit_fixture -from maastesting.testcase import MAASTestCase -from testtools.testcase import ExpectedException - - -def run_rabbit_command(rabbit, command): - """Run a Rabbit command through rabbitctl, and return its output.""" - if isinstance(command, unicode): - command = command.encode('ascii') - rabbit_env = rabbit.runner.environment - return rabbit_env.rabbitctl(command)[0] - - -class TestRabbitSession(MAASTestCase): - - @uses_rabbit_fixture - def test_connection_gets_connection(self): - session = RabbitSession() - self.addCleanup(session.disconnect) - # Referencing the connection property causes a connection to be - # created. - connection = session.connection - self.assertIsNotNone(session._connection) - # The same connection is returned every time. - self.assertIs(connection, session.connection) - - def test_connection_raises_NoRabbit_if_cannot_connect(self): - # Attempt to connect to a RabbitMQ on the local "discard" - # service. The connection will be refused. - self.patch(settings, 'RABBITMQ_HOST', 'localhost:9') - session = RabbitSession() - with ExpectedException(NoRabbit): - session.connection - - def test_connection_propagates_exceptions(self): - - def fail(*args, **kwargs): - raise socket.error("Connection not refused, but failed anyway.") - - self.patch(amqp, 'Connection', fail) - session = RabbitSession() - with ExpectedException(socket.error): - session.connection - - def test_disconnect(self): - session = RabbitSession() - session.disconnect() - self.assertIsNone(session._connection) - - -class TestRabbitMessaging(MAASTestCase): - - @uses_rabbit_fixture - def test_messaging_getExchange(self): - exchange_name = factory.getRandomString() - messaging = RabbitMessaging(exchange_name) - self.addCleanup(messaging._session.disconnect) - exchange = messaging.getExchange() - self.assertIsInstance(exchange, RabbitExchange) - self.assertEqual(messaging._session, exchange._session) - self.assertEqual(exchange_name, exchange.exchange_name) - - @uses_rabbit_fixture - def test_messaging_getQueue(self): - exchange_name = factory.getRandomString() - messaging = RabbitMessaging(exchange_name) - self.addCleanup(messaging._session.disconnect) - queue = messaging.getQueue() - self.assertIsInstance(queue, RabbitQueue) - self.assertEqual(messaging._session, queue._session) - self.assertEqual(exchange_name, queue.exchange_name) - - -class TestRabbitBase(MAASTestCase): - - def test_rabbitbase_contains_session(self): - exchange_name = factory.getRandomString() - rabbitbase = RabbitBase(RabbitSession(), exchange_name) - self.assertIsInstance(rabbitbase._session, RabbitSession) - - def test_base_has_exchange_name(self): - exchange_name = factory.getRandomString() - rabbitbase = RabbitBase(RabbitSession(), exchange_name) - self.assertEqual(exchange_name, rabbitbase.exchange_name) - - @uses_rabbit_fixture - def test_base_channel(self): - rabbitbase = RabbitBase(RabbitSession(), factory.getRandomString()) - self.addCleanup(rabbitbase._session.disconnect) - # Referencing the channel property causes an open channel to be - # created. - channel = rabbitbase.channel - self.assertTrue(channel.is_open) - self.assertIsNotNone(rabbitbase._session._connection) - # The same channel is returned every time. - self.assertIs(channel, rabbitbase.channel) - - @uses_rabbit_fixture - def test_base_channel_creates_exchange(self): - exchange_name = factory.getRandomString() - rabbitbase = RabbitBase(RabbitSession(), exchange_name) - self.addCleanup(rabbitbase._session.disconnect) - rabbitbase.channel - self.assertIn( - exchange_name, - run_rabbit_command(self.rabbit, 'list_exchanges')) - - -class TestRabbitExchange(MAASTestCase): - - def basic_get(self, channel, queue_name, timeout): - endtime = time.time() + timeout - while True: - message = channel.basic_get(queue_name) - if message is None: - if time.time() > endtime: - self.fail('Cannot get message.') - time.sleep(0.1) - else: - return message - - @uses_rabbit_fixture - def test_exchange_publish(self): - exchange_name = factory.getRandomString() - message_content = factory.getRandomString() - exchange = RabbitExchange(RabbitSession(), exchange_name) - self.addCleanup(exchange._session.disconnect) - - rabbitbase = RabbitBase(RabbitSession(), exchange_name) - self.addCleanup(rabbitbase._session.disconnect) - channel = rabbitbase.channel - queue_name = channel.queue_declare(auto_delete=True)[0] - channel.queue_bind(exchange=exchange_name, queue=queue_name) - exchange.publish(message_content) - message = self.basic_get(channel, queue_name, timeout=2) - self.assertEqual(message_content, message.body) - - -class TestRabbitQueue(MAASTestCase): - - @uses_rabbit_fixture - def test_rabbit_queue_binds_queue(self): - exchange_name = factory.getRandomString() - message_content = factory.getRandomString() - queue = RabbitQueue(RabbitSession(), exchange_name) - self.addCleanup(queue._session.disconnect) - - # Publish to queue.name. - base = RabbitBase(RabbitSession(), exchange_name) - self.addCleanup(base._session.disconnect) - channel = base.channel - msg = amqp.Message(message_content) - channel.basic_publish( - exchange=exchange_name, routing_key='', msg=msg) - message = channel.basic_get(queue.name) - self.assertEqual(message_content, message.body) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_refresh_worker.py maas-1.7.6+bzr3376/src/maasserver/tests/test_refresh_worker.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_refresh_worker.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_refresh_worker.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the code that refreshes a node-group worker's information.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from apiclient.creds import convert_tuple_to_string -from maasserver import refresh_worker as refresh_worker_module -from maasserver.models.user import get_creds_tuple -from maasserver.refresh_worker import refresh_worker -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maastesting.fakemethod import FakeMethod -from provisioningserver import tasks - - -class TestRefreshWorker(MAASServerTestCase): - - def patch_refresh_functions(self): - """Replace the worker refresh functions with a test double. - - The test double, which is returned for convenience, contains the - same keys as the original, but each maps to a `FakeMethod`. - - To verify a refresh task's effect, all that a test needs to do - is inspect the calls on these fakes. If the test mis-spells an - item's name, or tries to inspect a nonexistent item, it will - fail to find a test double for that item. - """ - playground_refresh_functions = { - item: FakeMethod() - for item in tasks.refresh_functions} - self.patch(tasks, 'refresh_functions', playground_refresh_functions) - return playground_refresh_functions - - def test_refreshes_api_credentials(self): - refresh_functions = self.patch_refresh_functions() - nodegroup = factory.make_node_group() - refresh_worker(nodegroup) - creds_string = convert_tuple_to_string( - get_creds_tuple(nodegroup.api_token)) - self.assertEqual( - [(creds_string, )], - refresh_functions['api_credentials'].extract_args()) - - def test_refreshes_nodegroup_uuid(self): - refresh_functions = self.patch_refresh_functions() - nodegroup = factory.make_node_group() - refresh_worker(nodegroup) - self.assertEqual( - [(nodegroup.uuid, )], - refresh_functions['nodegroup_uuid'].extract_args()) - - def test_refresh_worker_task_routed_to_nodegroup_worker(self): - nodegroup = factory.make_node_group() - task = self.patch(refresh_worker_module, 'refresh_secrets') - refresh_worker(nodegroup) - args, kwargs = task.apply_async.call_args - self.assertEqual(nodegroup.work_queue, kwargs['queue']) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_runserver.py maas-1.7.6+bzr3376/src/maasserver/tests/test_runserver.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_runserver.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_runserver.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the "runserver" command module.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from maasserver.management.commands.runserver import render_error -from maasserver.testing.testcase import MAASServerTestCase - - -class TestRunServer(MAASServerTestCase): - - def test_render_error_mentions_oops_id(self): - fake_oops = {'id': 'EHOVERCRAFTFULL999'} - self.assertIn(fake_oops['id'], render_error(fake_oops)) - - def test_render_error_returns_bytes(self): - # wsgi_oops produces oops pages as unicode strings, but django - # expects raw bytes. Our own error renderer returns bytes. - fake_oops = {'id': 'abc123'} - self.assertIsInstance(render_error(fake_oops), bytes) - - def test_render_error_blows_up_if_oops_id_is_not_ascii(self): - # Oopses mean that things aren't working as they should. We - # won't make things worse by including non-ASCII characters in - # the oops page. - fake_oops = {'id': '\u2322'} - self.assertRaises(Exception, render_error, fake_oops) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_security.py maas-1.7.6+bzr3376/src/maasserver/tests/test_security.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_security.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_security.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,18 +14,26 @@ __metaclass__ = type __all__ = [] +from binascii import b2a_hex from datetime import datetime +from os import unlink +from fixtures import EnvironmentVariableFixture from maasserver import security from maasserver.models.config import Config from maasserver.testing.testcase import MAASServerTestCase +from maastesting.djangotestcase import TransactionTestCase from maastesting.testcase import MAASTestCase +from provisioningserver.utils.fs import write_text_file from pytz import UTC from testtools.matchers import ( AfterPreprocessing, Equals, + FileContains, + GreaterThan, IsInstance, MatchesAll, + MatchesAny, ) from twisted.internet import ssl @@ -94,3 +102,66 @@ cert = security.get_region_certificate() self.assertThat(cert, is_valid_region_certificate) self.assertEqual(cert, security.load_region_certificate()) + + +is_valid_secret = MatchesAll( + IsInstance(bytes), AfterPreprocessing( + len, MatchesAny(Equals(16), GreaterThan(16)))) + + +class TestGetSharedSecret(TransactionTestCase): + + def setUp(self): + super(TestGetSharedSecret, self).setUp() + self.useFixture(EnvironmentVariableFixture( + "MAAS_ROOT", self.make_dir())) + + def test__generates_new_secret_when_none_exists(self): + secret = security.get_shared_secret() + self.assertThat(secret, is_valid_secret) + + def test__same_secret_is_returned_on_subsequent_calls(self): + self.assertEqual( + security.get_shared_secret(), + security.get_shared_secret()) + + def test__uses_database_secret_when_none_on_fs(self): + secret_before = security.get_shared_secret() + unlink(security.get_shared_secret_filesystem_path()) + secret_after = security.get_shared_secret() + self.assertEqual(secret_before, secret_after) + # The secret found in the database is written to the filesystem. + self.assertThat( + security.get_shared_secret_filesystem_path(), + FileContains(b2a_hex(secret_after))) + + def test__uses_filesystem_secret_when_none_in_database(self): + secret_before = security.get_shared_secret() + Config.objects.set_config("rpc_shared_secret", None) + secret_after = security.get_shared_secret() + self.assertEqual(secret_before, secret_after) + # The secret found on the filesystem is saved in the database. + self.assertEqual( + b2a_hex(secret_after), + Config.objects.get_config("rpc_shared_secret")) + + def test__errors_when_database_value_cannot_be_decoded(self): + security.get_shared_secret() # Ensure that the directory exists. + Config.objects.set_config("rpc_shared_secret", "_") + self.assertRaises(TypeError, security.get_shared_secret) + + def test__errors_when_database_and_filesystem_values_differ(self): + security.get_shared_secret() # Ensure that the directory exists. + Config.objects.set_config("rpc_shared_secret", "666f6f") + write_text_file( + security.get_shared_secret_filesystem_path(), "626172") + self.assertRaises(AssertionError, security.get_shared_secret) + + def test__deals_fine_with_whitespace_in_database_value(self): + Config.objects.set_config("rpc_shared_secret", " 666f6f\n") + # Ordinarily we would need to commit now, because get_shared_secret() + # runs in a separate thread. However, Django thinks that transaction + # management means AUTOCOMMIT, which spares us this diabolical chore. + # This is not unique to this test method; it comes from using Django's + # TransactionTestCase, which also has a misleading name. + self.assertEqual(b"foo", security.get_shared_secret()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_sequence.py maas-1.7.6+bzr3376/src/maasserver/tests/test_sequence.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_sequence.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_sequence.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test :class:`Sequence`.""" @@ -81,4 +81,4 @@ seq = Sequence(name) seq.create() self.assertSequenceEqual( - range(1, 11), [seq.nextval() for i in range(10)]) + range(1, 11), [seq.nextval() for _ in range(10)]) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_server_address.py maas-1.7.6+bzr3376/src/maasserver/tests/test_server_address.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_server_address.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_server_address.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the server_address module.""" @@ -14,78 +14,214 @@ __metaclass__ = type __all__ = [] +from collections import defaultdict +from random import randint + from django.conf import settings from maasserver import server_address +from maasserver.exceptions import UnresolvableHost from maasserver.server_address import get_maas_facing_server_address from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase -from maastesting.fakemethod import FakeMethod -from netaddr import IPNetwork +from netaddr import IPAddress + +def make_hostname(): + return '%s.example.com' % factory.make_hostname() -class TestServerAddress(MAASServerTestCase): - def make_hostname(self): - return '%s.example.com' % factory.make_hostname() +class TestGetMAASFacingServerHost(MAASServerTestCase): def set_DEFAULT_MAAS_URL(self, hostname=None, with_port=False): """Patch DEFAULT_MAAS_URL to be a (partly) random URL.""" if hostname is None: - hostname = self.make_hostname() + hostname = make_hostname() if with_port: - location = "%s:%d" % (hostname, factory.getRandomPort()) + location = "%s:%d" % (hostname, factory.pick_port()) else: location = hostname url = 'http://%s/%s' % (location, factory.make_name("path")) self.patch(settings, 'DEFAULT_MAAS_URL', url) def test_get_maas_facing_server_host_returns_host_name(self): - hostname = self.make_hostname() + hostname = make_hostname() self.set_DEFAULT_MAAS_URL(hostname) self.assertEqual( hostname, server_address.get_maas_facing_server_host()) def test_get_maas_facing_server_host_returns_ip_if_ip_configured(self): - ip = factory.getRandomIPAddress() + ip = factory.make_ipv4_address() self.set_DEFAULT_MAAS_URL(ip) self.assertEqual(ip, server_address.get_maas_facing_server_host()) def test_get_maas_facing_server_host_returns_nodegroup_maas_url(self): hostname = factory.make_hostname() maas_url = 'http://%s' % hostname - nodegroup = factory.make_node_group(maas_url=maas_url) + nodegroup = factory.make_NodeGroup(maas_url=maas_url) self.assertEqual( hostname, server_address.get_maas_facing_server_host(nodegroup)) def test_get_maas_facing_server_host_strips_out_port(self): - hostname = self.make_hostname() + hostname = make_hostname() self.set_DEFAULT_MAAS_URL(hostname, with_port=True) self.assertEqual( hostname, server_address.get_maas_facing_server_host()) - def test_get_maas_facing_server_address_returns_IP(self): - ip = factory.getRandomIPAddress() - self.set_DEFAULT_MAAS_URL(hostname=ip) - self.assertEqual(ip, get_maas_facing_server_address()) - - def test_get_maas_facing_server_address_returns_local_IP(self): - ip = factory.getRandomIPInNetwork(IPNetwork('127.0.0.0/8')) - self.set_DEFAULT_MAAS_URL(hostname=ip) - self.assertEqual(ip, get_maas_facing_server_address()) + def test_get_maas_facing_server_host_parses_IPv6_address_in_URL(self): + ip = factory.make_ipv6_address() + self.set_DEFAULT_MAAS_URL('[%s]' % ip) + self.assertEqual( + unicode(ip), server_address.get_maas_facing_server_host()) + - def test_get_maas_facing_server_address_returns_nodegroup_maas_url(self): - ip = factory.getRandomIPInNetwork(IPNetwork('127.0.0.0/8')) +class FakeResolveHostname: + """Fake implementation for `resolve_hostname`. + + Makes `resolve_hostname` return the given IP addresses (always as + `IPAddress`, even though you may pass them as text). It will return just + the IPv4 ones, or just the IPv6 ones, depending on which kind of address + the caller requests. + + :ivar results_by_ip_version: Return values, as a dict mapping IP version + to the set of results for that IP version. + :ivar hostname: Host name that was passed by the last invocation. + """ + + def __init__(self, *addresses): + self.hostname = None + self.results_by_ip_version = defaultdict(set) + for addr in addresses: + addr = IPAddress(addr) + self.results_by_ip_version[addr.version].add(addr) + + def __call__(self, hostname, ip_version): + assert ip_version in (4, 6) + self.hostname = hostname + return self.results_by_ip_version[ip_version] + + +class TestGetMAASFacingServerAddress(MAASServerTestCase): + + def make_addresses(self): + """Return a set of IP addresses, mixing IPv4 and IPv6.""" + return { + factory.make_ipv4_address(), + factory.make_ipv6_address(), + } + + def patch_get_maas_facing_server_host(self, host=None): + if host is None: + host = make_hostname() + patch = self.patch(server_address, 'get_maas_facing_server_host') + patch.return_value = unicode(host) + return patch + + def patch_resolve_hostname(self, addresses=None): + if addresses is None: + addresses = self.make_addresses() + fake = FakeResolveHostname(*addresses) + return self.patch(server_address, 'resolve_hostname', fake) + + def test__integrates_with_get_maas_facing_server_host(self): + ip = factory.make_ipv4_address() maas_url = 'http://%s' % ip - nodegroup = factory.make_node_group(maas_url=maas_url) + nodegroup = factory.make_NodeGroup(maas_url=maas_url) self.assertEqual( - ip, server_address.get_maas_facing_server_host(nodegroup)) + unicode(ip), + server_address.get_maas_facing_server_host(nodegroup)) - def test_get_maas_facing_server_address_resolves_hostname(self): - ip = factory.getRandomIPAddress() - resolver = FakeMethod(result=ip) - self.patch(server_address, 'gethostbyname', resolver) - hostname = self.make_hostname() - self.set_DEFAULT_MAAS_URL(hostname=hostname) + def test__uses_IPv4_hostname_directly_if_ipv4_set(self): + ip = factory.make_ipv4_address() + self.patch_get_maas_facing_server_host(ip) + fake_resolve = self.patch_resolve_hostname() + result = get_maas_facing_server_address(ipv4=True) + self.assertEqual(ip, result) + self.assertIsNone(fake_resolve.hostname) + + def test__rejects_IPv4_hostname_if_ipv4_not_set(self): + self.patch_get_maas_facing_server_host(factory.make_ipv4_address()) + fake_resolve = self.patch_resolve_hostname() + self.assertRaises( + UnresolvableHost, + get_maas_facing_server_address, ipv4=False) + self.assertIsNone(fake_resolve.hostname) + + def test__uses_IPv6_hostname_directly_if_ipv6_set(self): + ip = factory.make_ipv6_address() + self.patch_get_maas_facing_server_host(ip) + fake_resolve = self.patch_resolve_hostname() + result = get_maas_facing_server_address(ipv6=True) + self.assertEqual(ip, result) + self.assertIsNone(fake_resolve.hostname) + + def test__rejects_IPv6_hostname_if_ipv6_not_set(self): + self.patch_get_maas_facing_server_host(factory.make_ipv6_address()) + fake_resolve = self.patch_resolve_hostname() + self.assertRaises( + UnresolvableHost, + get_maas_facing_server_address, ipv6=False) + self.assertIsNone(fake_resolve.hostname) + + def test__resolves_hostname(self): + hostname = make_hostname() + self.patch_get_maas_facing_server_host(hostname) + ip = factory.make_ipv4_address() + fake_resolve = self.patch_resolve_hostname([ip]) + result = get_maas_facing_server_address() + self.assertEqual(unicode(ip), result) + self.assertEqual(hostname, fake_resolve.hostname) + + def test__prefers_IPv4_if_ipv4_set(self): + # If a server has mixed v4 and v6 addresses, + # get_maas_facing_server_address() will return a v4 address + # rather than a v6 one. + v4_ip = factory.make_ipv4_address() + v6_ip = factory.make_ipv6_address() + self.patch_resolve_hostname([v4_ip, v6_ip]) + self.patch_get_maas_facing_server_host() self.assertEqual( - (ip, [(hostname, )]), - (get_maas_facing_server_address(), resolver.extract_args())) + unicode(v4_ip), + get_maas_facing_server_address(ipv4=True, ipv6=True)) + + def test__ignores_IPv4_if_ipv4_not_set(self): + v4_ip = factory.make_ipv4_address() + v6_ip = factory.make_ipv6_address() + self.patch_resolve_hostname([v4_ip, v6_ip]) + self.patch_get_maas_facing_server_host() + self.assertEqual( + unicode(v6_ip), + get_maas_facing_server_address(ipv4=False, ipv6=True)) + + def test__falls_back_on_IPv6_if_ipv4_set_but_no_IPv4_address_found(self): + v6_ip = factory.make_ipv6_address() + self.patch_resolve_hostname([v6_ip]) + self.patch_get_maas_facing_server_host() + self.assertEqual( + unicode(v6_ip), + get_maas_facing_server_address(ipv4=True, ipv6=True)) + + def test__prefers_global_IPv6_over_link_local_IPv6(self): + global_ipv6 = factory.make_ipv6_address() + local_ipv6 = [ + 'fe80::%d:9876:5432:10' % randint(0, 9999) + for _ in range(5) + ] + self.patch_resolve_hostname([global_ipv6] + local_ipv6) + self.patch_get_maas_facing_server_host() + self.assertEqual( + unicode(global_ipv6), + get_maas_facing_server_address()) + + def test__fails_if_neither_ipv4_nor_ipv6_set(self): + self.patch_resolve_hostname() + self.patch_get_maas_facing_server_host() + self.assertRaises( + UnresolvableHost, + get_maas_facing_server_address, ipv4=False, ipv6=False) + + def test__raises_error_if_hostname_does_not_resolve(self): + self.patch_resolve_hostname([]) + self.patch_get_maas_facing_server_host() + self.assertRaises( + UnresolvableHost, + get_maas_facing_server_address) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_signals.py maas-1.7.6+bzr3376/src/maasserver/tests/test_signals.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_signals.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_signals.py 2015-07-10 01:27:14.000000000 +0000 @@ -33,10 +33,10 @@ def test_connect_to_field_change_calls_callback(self): callback = Mock() connect_to_field_change(callback, FieldChangeTestModel, ['name1']) - old_name1_value = factory.getRandomString() + old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() - obj.name1 = factory.getRandomString() + obj.name1 = factory.make_string() obj.save() self.assertEqual( [call(obj, (old_name1_value,), deleted=False)], @@ -45,22 +45,22 @@ def test_connect_to_field_change_calls_callback_for_each_save(self): callback = Mock() connect_to_field_change(callback, FieldChangeTestModel, ['name1']) - old_name1_value = factory.getRandomString() + old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() - obj.name1 = factory.getRandomString() + obj.name1 = factory.make_string() obj.save() - obj.name1 = factory.getRandomString() + obj.name1 = factory.make_string() obj.save() self.assertEqual(2, callback.call_count) def test_connect_to_field_change_calls_callback_for_each_real_save(self): callback = Mock() connect_to_field_change(callback, FieldChangeTestModel, ['name1']) - old_name1_value = factory.getRandomString() + old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() - obj.name1 = factory.getRandomString() + obj.name1 = factory.make_string() obj.save() obj.save() self.assertEqual(1, callback.call_count) @@ -70,31 +70,31 @@ connect_to_field_change(callback1, FieldChangeTestModel, ['name1']) callback2 = Mock() connect_to_field_change(callback2, FieldChangeTestModel, ['name1']) - old_name1_value = factory.getRandomString() + old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() - obj.name1 = factory.getRandomString() + obj.name1 = factory.make_string() obj.save() self.assertEqual((1, 1), (callback1.call_count, callback2.call_count)) def test_connect_to_field_change_ignores_changes_to_other_fields(self): - obj = FieldChangeTestModel(name2=factory.getRandomString()) + obj = FieldChangeTestModel(name2=factory.make_string()) obj.save() callback = Mock() connect_to_field_change(callback, FieldChangeTestModel, ['name1']) - obj.name2 = factory.getRandomString() + obj.name2 = factory.make_string() obj.save() self.assertEqual(0, callback.call_count) def test_connect_to_field_change_ignores_object_creation(self): callback = Mock() connect_to_field_change(callback, FieldChangeTestModel, ['name1']) - obj = FieldChangeTestModel(name1=factory.getRandomString()) + obj = FieldChangeTestModel(name1=factory.make_string()) obj.save() self.assertEqual(0, callback.call_count) def test_connect_to_field_change_ignores_deletion_by_default(self): - obj = FieldChangeTestModel(name2=factory.getRandomString()) + obj = FieldChangeTestModel(name2=factory.make_string()) obj.save() callback = Mock() connect_to_field_change(callback, FieldChangeTestModel, ['name1']) @@ -102,7 +102,7 @@ self.assertEqual(0, callback.call_count) def test_connect_to_field_change_listens_to_deletion_if_delete_True(self): - old_name1_value = factory.getRandomString() + old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() callback = Mock() diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_start_up.py maas-1.7.6+bzr3376/src/maasserver/tests/test_start_up.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_start_up.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_start_up.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,23 +19,25 @@ locks, start_up, ) -from maasserver.components import ( - discard_persistent_error, - register_persistent_error, - ) -from maasserver.enum import COMPONENT +from maasserver.bootresources import ensure_boot_source_definition +from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.models import ( - BootImage, + BootSource, + BootSourceSelection, NodeGroup, ) +from maasserver.testing.eventloop import RegionEventLoopFixture from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase -from maastesting.celery import CeleryFixture from maastesting.fakemethod import FakeMethod -from maastesting.matchers import MockCalledOnceWith -from mock import Mock -from provisioningserver import tasks -from testresources import FixtureResource +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) +from testtools.matchers import ( + Equals, + HasLength, + ) class LockChecker: @@ -51,42 +53,22 @@ class TestStartUp(MAASServerTestCase): - """Testing for the method `start_up`.""" + """Tests for the `start_up` function. - resources = ( - ('celery', FixtureResource(CeleryFixture())), - ) + The actual work happens in `inner_start_up` and `test_start_up`; the tests + you see here are for the locking wrapper only. + """ + + def setUp(self): + super(TestStartUp, self).setUp() + self.useFixture(RegionEventLoopFixture()) + self.patch(start_up, 'create_gnupg_home') + self.patch(start_up, 'import_resources') def tearDown(self): super(TestStartUp, self).tearDown() - eventloop.stop().wait(5) - - def test_start_up_calls_write_full_dns_config(self): - recorder = FakeMethod() - self.patch(start_up, 'write_full_dns_config', recorder) - start_up.start_up() - self.assertEqual( - (1, [()]), - (recorder.call_count, recorder.extract_args())) - - def test_start_up_creates_master_nodegroup(self): - start_up.start_up() - self.assertEqual(1, NodeGroup.objects.all().count()) - - def test_start_up_refreshes_workers(self): - patched_handlers = tasks.refresh_functions.copy() - patched_handlers['nodegroup_uuid'] = Mock() - self.patch(tasks, 'refresh_functions', patched_handlers) - start_up.start_up() - self.assertThat( - patched_handlers['nodegroup_uuid'], - MockCalledOnceWith(NodeGroup.objects.ensure_master().uuid)) - - def test_start_up_refreshes_workers_outside_lock(self): - lock_checker = LockChecker() - self.patch(NodeGroup.objects, 'refresh_workers', lock_checker) - start_up.start_up() - self.assertEquals(False, lock_checker.lock_was_held) + # start_up starts the Twisted event loop, so we need to stop it. + eventloop.reset().wait(5) def test_start_up_runs_in_exclusion(self): lock_checker = LockChecker() @@ -95,45 +77,85 @@ self.assertEqual(1, lock_checker.call_count) self.assertEqual(True, lock_checker.lock_was_held) - def test_start_up_warns_about_missing_boot_images(self): - # If no boot images have been registered yet, that may mean that - # the import script has not been successfully run yet, or that - # the master worker is having trouble reporting its images. And - # so start_up registers a persistent warning about this. - BootImage.objects.all().delete() - discard_persistent_error(COMPONENT.IMPORT_PXE_FILES) - recorder = self.patch(start_up, 'register_persistent_error') - start_up.start_up() - - self.assertIn( - COMPONENT.IMPORT_PXE_FILES, - [args[0][0] for args in recorder.call_args_list]) - - def test_start_up_does_not_warn_if_boot_images_are_known(self): - # If boot images are known, there is no warning about the import - # script. - factory.make_boot_image() - recorder = self.patch(start_up, 'register_persistent_error') - - start_up.start_up() +class TestStartImportOnUpgrade(MAASServerTestCase): + """Tests for the `start_import_on_upgrade` function.""" - self.assertNotIn( - COMPONENT.IMPORT_PXE_FILES, - [args[0][0] for args in recorder.call_args_list]) - - def test_start_up_does_not_warn_if_already_warning(self): - # If there already is a warning about missing boot images, it is - # based on more precise knowledge of whether we ever heard from - # the region worker at all. It will not be replaced by a less - # knowledgeable warning. - BootImage.objects.all().delete() - register_persistent_error( - COMPONENT.IMPORT_PXE_FILES, factory.getRandomString()) - recorder = self.patch(start_up, 'register_persistent_error') + def setUp(self): + super(TestStartImportOnUpgrade, self).setUp() + ensure_boot_source_definition() + self.mock_import_resources = self.patch(start_up, 'import_resources') + + def test__does_nothing_if_boot_resources_exist(self): + mock_list_boot_images = self.patch(start_up, 'list_boot_images') + factory.make_BootResource() + start_up.start_import_on_upgrade() + self.assertThat(mock_list_boot_images, MockNotCalled()) + + def test__does_nothing_if_list_boot_images_is_empty(self): + self.patch(start_up, 'list_boot_images').return_value = [] + start_up.start_import_on_upgrade() + self.assertThat(self.mock_import_resources, MockNotCalled()) + + def test__calls_import_resources(self): + self.patch(start_up, 'list_boot_images').return_value = [ + make_rpc_boot_image(), + ] + start_up.start_import_on_upgrade() + self.assertThat(self.mock_import_resources, MockCalledOnceWith()) + + def test__sets_source_selections_based_on_boot_images(self): + boot_images = [ + make_rpc_boot_image() + for _ in range(3) + ] + self.patch(start_up, 'list_boot_images').return_value = boot_images + start_up.start_import_on_upgrade() + + boot_source = BootSource.objects.first() + for image in boot_images: + selection = BootSourceSelection.objects.get( + boot_source=boot_source, os=image["osystem"], + release=image["release"]) + self.assertIsNotNone(selection) + self.expectThat(selection.arches, Equals([image["architecture"]])) + self.expectThat(selection.subarches, Equals(["*"])) + self.expectThat(selection.labels, Equals([image["label"]])) + + +class TestInnerStartUp(MAASServerTestCase): + """Tests for the actual work done in `inner_start_up`.""" + + def setUp(self): + super(TestInnerStartUp, self).setUp() + self.patch(start_up, 'import_resources') + self.mock_create_gnupg_home = self.patch( + start_up, 'create_gnupg_home') - start_up.start_up() + def test__calls_write_full_dns_config(self): + recorder = FakeMethod() + self.patch(start_up, 'write_full_dns_config', recorder) + start_up.inner_start_up() + self.assertEqual( + (1, [()]), + (recorder.call_count, recorder.extract_args())) - self.assertNotIn( - COMPONENT.IMPORT_PXE_FILES, - [args[0][0] for args in recorder.call_args_list]) + def test__creates_master_nodegroup(self): + start_up.inner_start_up() + clusters = NodeGroup.objects.all() + self.assertThat(clusters, HasLength(1)) + self.assertItemsEqual([NodeGroup.objects.ensure_master()], clusters) + + def test__calls_create_gnupg_home(self): + start_up.inner_start_up() + self.assertThat(self.mock_create_gnupg_home, MockCalledOnceWith()) + + def test__initialises_boot_source_config(self): + self.assertItemsEqual([], BootSource.objects.all()) + start_up.inner_start_up() + self.assertThat(BootSource.objects.all(), HasLength(1)) + + def test__calls_start_import_on_upgrade(self): + mock_start_import = self.patch(start_up, 'start_import_on_upgrade') + start_up.inner_start_up() + self.expectThat(mock_start_import, MockCalledOnceWith()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_tasks.py maas-1.7.6+bzr3376/src/maasserver/tests/test_tasks.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_tasks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_tasks.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for Celery tasks.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from fixtures import FakeLogger -from maasserver import tasks -from maasserver.enum import NODEGROUP_STATUS -from maasserver.models import NodeGroup -from maasserver.testing.factory import factory -from maastesting.celery import CeleryFixture -from maastesting.testcase import MAASTestCase -import mock -from testresources import FixtureResource -from testtools.matchers import Contains - - -class TestCleanupOldNonces(MAASTestCase): - - resources = ( - ("celery", FixtureResource(CeleryFixture())), - ) - - def test_cleanup_old_nonces_calls_cleanup_old_nonces(self): - logger = self.useFixture(FakeLogger('maasserver')) - nb_cleanups = 3 - fake = self.patch(tasks, 'nonces_cleanup') - fake.cleanup_old_nonces.return_value = nb_cleanups - tasks.cleanup_old_nonces() - self.assertEqual( - [mock.call()], - fake.cleanup_old_nonces.mock_calls) - message = "%d expired nonce(s) cleaned up." % nb_cleanups - self.assertThat(logger.output, Contains(message)) - - def test_import_boot_images_on_schedule_imports_images(self): - self.patch(NodeGroup, 'import_boot_images') - nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) - tasks.import_boot_images_on_schedule() - self.assertEqual( - [mock.call()], - nodegroup.import_boot_images.mock_calls) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_third_party_drivers.py maas-1.7.6+bzr3376/src/maasserver/tests/test_third_party_drivers.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_third_party_drivers.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_third_party_drivers.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,6 +18,7 @@ from maasserver import third_party_drivers from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase from maasserver.third_party_drivers import ( DriversConfig, get_third_party_driver, @@ -26,33 +27,33 @@ populate_kernel_opts, ) from maastesting import root -from maastesting.testcase import MAASTestCase +from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin from metadataserver.models import ( commissioningscript, - NodeCommissionResult, + NodeResult, ) -class TestNodeModaliases(MAASTestCase): +class TestNodeModaliases(MAASServerTestCase): def test_uses_commissioning_modaliases(self): test_data = b'hulla\nbaloo' - node = factory.make_node() - NodeCommissionResult.objects.store_data( + node = factory.make_Node() + NodeResult.objects.store_data( node, commissioningscript.LIST_MODALIASES_OUTPUT_NAME, - 0, Bin(test_data)) + 0, RESULT_TYPE.COMMISSIONING, Bin(test_data)) aliases = node_modaliases(node) self.assertEqual(['hulla', 'baloo'], aliases) def test_survives_no_commissioning_data(self): - node = factory.make_node() + node = factory.make_Node() aliases = node_modaliases(node) self.assertEqual([], aliases) -class TestMatchAliasesToDriver(MAASTestCase): +class TestMatchAliasesToDriver(MAASServerTestCase): def test_finds_first_match(self): drivers = [ @@ -72,7 +73,7 @@ self.assertIsNone(driver) -class TestPopulateKernelOpts(MAASTestCase): +class TestPopulateKernelOpts(MAASServerTestCase): def test_blacklist_provided(self): driver = {'blacklist': 'bad'} @@ -85,10 +86,10 @@ self.assertNotIn('kernel_opts', driver) -class TestGetThirdPartyCode(MAASTestCase): +class TestGetThirdPartyCode(MAASServerTestCase): def test_finds_match(self): - node = factory.make_node() + node = factory.make_Node() mock = self.patch(third_party_drivers, 'match_aliases_to_driver') base_driver = dict(comment='hooray') mock.return_value = base_driver @@ -101,14 +102,14 @@ self.assertEqual('hooray', driver['comment']) def test_finds_no_match(self): - node = factory.make_node() + node = factory.make_Node() mock = self.patch(third_party_drivers, 'match_aliases_to_driver') mock.return_value = None driver = get_third_party_driver(node) self.assertEqual({}, driver) -class TestDriversConfig(MAASTestCase): +class TestDriversConfig(MAASServerTestCase): def test_get_defaults_returns_empty_drivers_list(self): observed = DriversConfig.get_defaults() diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_worker_user.py maas-1.7.6+bzr3376/src/maasserver/tests/test_worker_user.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_worker_user.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_worker_user.py 2015-07-10 01:27:14.000000000 +0000 @@ -25,7 +25,7 @@ class TestNodeGroupUser(MAASServerTestCase): - """Test the special "user" that celery workers use to access the API.""" + """Test the special "user" that workers use to access the API.""" def test_get_worker_user_always_returns_same_user(self): self.assertEqual(get_worker_user().id, get_worker_user().id) @@ -39,7 +39,3 @@ worker_user = get_worker_user() self.assertIn(worker_user.username, SYSTEM_USERS) self.assertRaises(UserProfile.DoesNotExist, worker_user.get_profile) - - def test_get_worker_user_caches_user(self): - get_worker_user() - self.assertNumQueries(0, get_worker_user) diff -Nru maas-1.5.4+bzr2294/src/maasserver/tests/test_x509.py maas-1.7.6+bzr3376/src/maasserver/tests/test_x509.py --- maas-1.5.4+bzr2294/src/maasserver/tests/test_x509.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/tests/test_x509.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,199 @@ +# Copyright 2014 Cloudbase Solutions SRL. +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `maasserver.x509`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os + +from maasserver import x509 +from maasserver.x509 import ( + WinRMX509, + WinRMX509Error, + ) +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +import OpenSSL +from testtools.matchers import ( + FileContains, + FileExists, + ) + + +class TestWinRMX509(MAASTestCase): + + def configure_WinRMX509(self): + cert_name = factory.make_name('cert_name') + upn_name = factory.make_name('upn_name') + cert_dir = self.make_dir() + winrmx509 = WinRMX509( + cert_name=cert_name, upn_name=upn_name, cert_dir=cert_dir) + return winrmx509 + + def make_certificate(self): + winrmx509 = self.configure_WinRMX509() + _, cert = winrmx509.get_key_and_cert() + winrmx509.write_cert(cert) + return cert, winrmx509 + + def dump_certificate(self, cert): + return OpenSSL.crypto.dump_certificate( + OpenSSL.crypto.FILETYPE_PEM, cert) + + def make_privatekey(self): + winrmx509 = self.configure_WinRMX509() + key, _ = winrmx509.get_key_and_cert() + winrmx509.write_privatekey(key) + return key, winrmx509 + + def dump_privatekey(self, key): + return OpenSSL.crypto.dump_privatekey( + OpenSSL.crypto.FILETYPE_PEM, key) + + def make_cert_and_privatekey(self): + winrmx509 = self.configure_WinRMX509() + key, cert = winrmx509.get_key_and_cert() + winrmx509.write_cert(cert) + winrmx509.write_privatekey(key) + return key, cert, winrmx509 + + def test_create_cert_raises_error_on_file_already_exists(self): + cert, winrmx509 = self.make_certificate() + self.assertRaises(WinRMX509Error, winrmx509.create_cert) + + def test_create_cert_writes_cert(self): + winrmx509 = self.configure_WinRMX509() + winrmx509.create_cert() + self.assertThat(winrmx509.pem_file, FileExists()) + + def test_create_cert_writes_privatekey(self): + winrmx509 = self.configure_WinRMX509() + winrmx509.create_cert() + self.assertThat(winrmx509.key_file, FileExists()) + + def test_create_cert_exports_p12(self): + winrmx509 = self.configure_WinRMX509() + winrmx509.create_cert() + self.assertThat(winrmx509.pfx_file, FileExists()) + + def test_create_cert_raises_error_on_export_p12_error(self): + winrmx509 = self.configure_WinRMX509() + self.patch(winrmx509, 'export_p12').side_effect = OpenSSL.crypto.Error + self.assertRaises(WinRMX509Error, winrmx509.create_cert) + + def test_create_cert_calls_print_cert_details(self): + winrmx509 = self.configure_WinRMX509() + mock_print = self.patch(winrmx509, 'print_cert_details') + winrmx509.create_cert(print_cert=True) + self.assertThat(mock_print, MockCalledOnceWith(winrmx509.pem_file)) + + def test_get_key_and_cert_returns_rsa_key(self): + winrmx509 = self.configure_WinRMX509() + key, _ = winrmx509.get_key_and_cert() + self.assertEqual(OpenSSL.crypto.TYPE_RSA, key.type()) + + def test_get_key_and_cert_returns_key_of_correct_size(self): + winrmx509 = self.configure_WinRMX509() + key, _ = winrmx509.get_key_and_cert() + self.assertEqual(winrmx509.KEY_SIZE, key.bits()) + + def test_get_key_and_cert_returns_cert_with_upn_name(self): + winrmx509 = self.configure_WinRMX509() + _, cert = winrmx509.get_key_and_cert() + self.assertEqual(winrmx509.upn_name, cert.get_subject().CN) + + def test_get_key_and_cert_returns_cert_with_valid_serial_number(self): + winrmx509 = self.configure_WinRMX509() + _, cert = winrmx509.get_key_and_cert() + self.assertEqual(1000, cert.get_serial_number()) + + def test_get_key_and_cert_returns_cert_with_extensions(self): + winrmx509 = self.configure_WinRMX509() + _, cert = winrmx509.get_key_and_cert() + self.assertEqual(2, cert.get_extension_count()) + self.assertEqual( + 'subjectAltName', cert.get_extension(0).get_short_name()) + self.assertEqual( + 'extendedKeyUsage', cert.get_extension(1).get_short_name()) + + def test_get_key_and_cert_returns_cert_with_issuer_set_from_subject(self): + winrmx509 = self.configure_WinRMX509() + _, cert = winrmx509.get_key_and_cert() + self.assertEqual(cert.get_subject(), cert.get_issuer()) + + def test_get_cert_details(self): + cert, winrmx509 = self.make_certificate() + self.assertItemsEqual({ + 'subject': cert.get_subject().CN, + 'thumbprint': cert.digest('SHA1'), + 'contents': self.dump_certificate(cert), + }, winrmx509.get_cert_details(winrmx509.pem_file)) + + def test_write_privatekey(self): + key, winrmx509 = self.make_privatekey() + self.assertThat( + winrmx509.key_file, FileContains(self.dump_privatekey(key))) + + def test_write_cert(self): + cert, winrmx509 = self.make_certificate() + self.assertThat( + winrmx509.pem_file, FileContains(self.dump_certificate(cert))) + + def test_load_pem_file_returns_cert_and_contents(self): + cert, winrmx509 = self.make_certificate() + loaded_cert, contents = winrmx509.load_pem_file(winrmx509.pem_file) + self.assertEqual(self.dump_certificate(cert), contents) + self.assertEqual( + self.dump_certificate(cert), self.dump_certificate(loaded_cert)) + + def test_load_pem_file_raises_error_on_invalid_cert(self): + winrmx509 = self.configure_WinRMX509() + self.patch( + x509, 'read_text_file').return_value = factory.make_string() + self.assertRaises(WinRMX509Error, winrmx509.load_pem_file, 'file') + + def test_export_p12(self): + key, cert, winrmx509 = self.make_cert_and_privatekey() + passphrase = factory.make_name('password') + winrmx509.export_p12(key, cert, passphrase) + with open(winrmx509.pfx_file, 'rb') as stream: + p12_contents = stream.read() + p12 = OpenSSL.crypto.load_pkcs12(p12_contents, passphrase) + self.assertEqual( + self.dump_certificate(cert), + self.dump_certificate(p12.get_certificate())) + self.assertEqual( + self.dump_privatekey(key), + self.dump_privatekey(p12.get_privatekey())) + + def test_get_ssl_dir_calls_ensure_dir(self): + winrmx509 = self.configure_WinRMX509() + mock_ensure_dir = self.patch(x509, 'ensure_dir') + fake_dir = factory.make_name('dir') + winrmx509.get_ssl_dir(fake_dir) + self.assertThat(mock_ensure_dir, MockCalledOnceWith(fake_dir)) + + def test_get_ssl_dir_returns_home_ssl_dir(self): + winrmx509 = self.configure_WinRMX509() + self.patch(x509, 'ensure_dir') + self.assertEqual( + os.path.join(os.path.expanduser("~"), '.ssl'), + winrmx509.get_ssl_dir()) + + def test_generate_passphrase(self): + winrmx509 = self.configure_WinRMX509() + self.assertEqual( + winrmx509.PASSPHRASE_LENGTH, + len(winrmx509.generate_passphrase())) diff -Nru maas-1.5.4+bzr2294/src/maasserver/third_party_drivers.py maas-1.7.6+bzr3376/src/maasserver/third_party_drivers.py --- maas-1.5.4+bzr2294/src/maasserver/third_party_drivers.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/third_party_drivers.py 2015-07-10 01:27:14.000000000 +0000 @@ -123,7 +123,7 @@ def node_modaliases(node): """Return a list of modaliases from the node.""" name = commissioningscript.LIST_MODALIASES_OUTPUT_NAME - query = node.nodecommissionresult_set.filter(name__exact=name) + query = node.noderesult_set.filter(name__exact=name) if len(query) == 0: return [] diff -Nru maas-1.5.4+bzr2294/src/maasserver/urls_api.py maas-1.7.6+bzr3376/src/maasserver/urls_api.py --- maas-1.5.4+bzr2294/src/maasserver/urls_api.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/urls_api.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,49 +18,104 @@ patterns, url, ) -from maasserver.api import ( - AccountHandler, - api_doc, - BootImageHandler, - BootImagesHandler, - CommissioningResultsHandler, +from maasserver.api.account import AccountHandler +from maasserver.api.auth import api_auth +from maasserver.api.boot_images import BootImagesHandler +from maasserver.api.boot_resources import ( + BootResourceFileUploadHandler, + BootResourceHandler, + BootResourcesHandler, + ) +from maasserver.api.boot_source_selections import ( + BootSourceSelectionBackwardHandler, + BootSourceSelectionHandler, + BootSourceSelectionsBackwardHandler, + BootSourceSelectionsHandler, + ) +from maasserver.api.boot_sources import ( + BootSourceBackwardHandler, + BootSourceHandler, + BootSourcesBackwardHandler, + BootSourcesHandler, + ) +from maasserver.api.commissioning_scripts import ( CommissioningScriptHandler, CommissioningScriptsHandler, + ) +from maasserver.api.doc_handler import ( + api_doc, describe, + ) +from maasserver.api.files import ( FileHandler, FilesHandler, - MaasHandler, + ) +from maasserver.api.ip_addresses import IPAddressesHandler +from maasserver.api.license_keys import ( + LicenseKeyHandler, + LicenseKeysHandler, + ) +from maasserver.api.maas import MaasHandler +from maasserver.api.networks import ( NetworkHandler, NetworksHandler, - NodeGroupHandler, + ) +from maasserver.api.node_group_interfaces import ( NodeGroupInterfaceHandler, NodeGroupInterfacesHandler, + ) +from maasserver.api.node_groups import ( + NodeGroupHandler, NodeGroupsHandler, - NodeHandler, + ) +from maasserver.api.node_macs import ( NodeMacHandler, NodeMacsHandler, + ) +from maasserver.api.nodes import ( + NodeHandler, NodesHandler, - pxeconfig, + ) +from maasserver.api.pxeconfig import pxeconfig +from maasserver.api.results import NodeResultsHandler +from maasserver.api.ssh_keys import ( SSHKeyHandler, SSHKeysHandler, + ) +from maasserver.api.ssl_keys import ( + SSLKeyHandler, + SSLKeysHandler, + ) +from maasserver.api.support import ( + AdminRestrictedResource, + RestrictedResource, + ) +from maasserver.api.tags import ( TagHandler, TagsHandler, + ) +from maasserver.api.users import ( UserHandler, UsersHandler, - VersionHandler, + ) +from maasserver.api.version import VersionHandler +from maasserver.api.zones import ( ZoneHandler, ZonesHandler, ) -from maasserver.api_auth import api_auth -from maasserver.api_support import ( - AdminRestrictedResource, - RestrictedResource, - ) account_handler = RestrictedResource(AccountHandler, authentication=api_auth) +boot_resource_handler = RestrictedResource( + BootResourceHandler, authentication=api_auth) +boot_resource_file_upload_handler = RestrictedResource( + BootResourceFileUploadHandler, authentication=api_auth) +boot_resources_handler = RestrictedResource( + BootResourcesHandler, authentication=api_auth) files_handler = RestrictedResource(FilesHandler, authentication=api_auth) file_handler = RestrictedResource(FileHandler, authentication=api_auth) +ipaddresses_handler = RestrictedResource( + IPAddressesHandler, authentication=api_auth) network_handler = RestrictedResource(NetworkHandler, authentication=api_auth) networks_handler = RestrictedResource(NetworksHandler, authentication=api_auth) node_handler = RestrictedResource(NodeHandler, authentication=api_auth) @@ -78,15 +133,15 @@ NodeGroupInterfacesHandler, authentication=api_auth) boot_images_handler = RestrictedResource( BootImagesHandler, authentication=api_auth) -boot_image_handler = RestrictedResource( - BootImageHandler, authentication=api_auth) tag_handler = RestrictedResource(TagHandler, authentication=api_auth) tags_handler = RestrictedResource(TagsHandler, authentication=api_auth) version_handler = RestrictedResource(VersionHandler) -commissioning_results_handler = RestrictedResource( - CommissioningResultsHandler, authentication=api_auth) +node_results_handler = RestrictedResource( + NodeResultsHandler, authentication=api_auth) sshkey_handler = RestrictedResource(SSHKeyHandler, authentication=api_auth) sshkeys_handler = RestrictedResource(SSHKeysHandler, authentication=api_auth) +sslkey_handler = RestrictedResource(SSLKeyHandler, authentication=api_auth) +sslkeys_handler = RestrictedResource(SSLKeysHandler, authentication=api_auth) user_handler = RestrictedResource(UserHandler, authentication=api_auth) users_handler = RestrictedResource(UsersHandler, authentication=api_auth) zone_handler = RestrictedResource(ZoneHandler, authentication=api_auth) @@ -99,6 +154,26 @@ CommissioningScriptHandler, authentication=api_auth) commissioning_scripts_handler = AdminRestrictedResource( CommissioningScriptsHandler, authentication=api_auth) +boot_source_handler = AdminRestrictedResource( + BootSourceHandler, authentication=api_auth) +boot_sources_handler = AdminRestrictedResource( + BootSourcesHandler, authentication=api_auth) +boot_source_selection_handler = AdminRestrictedResource( + BootSourceSelectionHandler, authentication=api_auth) +boot_source_selections_handler = AdminRestrictedResource( + BootSourceSelectionsHandler, authentication=api_auth) +boot_source_backward_handler = AdminRestrictedResource( + BootSourceBackwardHandler, authentication=api_auth) +boot_sources_backward_handler = AdminRestrictedResource( + BootSourcesBackwardHandler, authentication=api_auth) +boot_source_selection_backward_handler = AdminRestrictedResource( + BootSourceSelectionBackwardHandler, authentication=api_auth) +boot_source_selections_backward_handler = AdminRestrictedResource( + BootSourceSelectionsBackwardHandler, authentication=api_auth) +license_key_handler = AdminRestrictedResource( + LicenseKeyHandler, authentication=api_auth) +license_keys_handler = AdminRestrictedResource( + LicenseKeysHandler, authentication=api_auth) # API URLs accessible to anonymous users. @@ -134,12 +209,10 @@ url(r'^nodegroups/$', nodegroups_handler, name='nodegroups_handler'), url(r'^nodegroups/(?P[^/]+)/interfaces/$', nodegroupinterfaces_handler, name='nodegroupinterfaces_handler'), - url(r'^nodegroups/(?P[^/]+)/interfaces/(?P[^/]+)/$', + url(r'^nodegroups/(?P[^/]+)/interfaces/(?P[^/]+)/$', nodegroupinterface_handler, name='nodegroupinterface_handler'), url(r'^nodegroups/(?P[^/]+)/boot-images/$', boot_images_handler, name='boot_images_handler'), - url(r'^nodegroups/(?P[^/]+)/boot-images/(?P[^/]+)/$', - boot_image_handler, name='boot_image_handler'), url( r'^networks/(?P[\w\-]+)/$', network_handler, name='network_handler'), @@ -148,6 +221,10 @@ url(r'^files/(?P.+)/$', file_handler, name='file_handler'), url(r'^account/$', account_handler, name='account_handler'), url( + r'^account/prefs/sslkeys/(?P[^/]+)/$', sslkey_handler, + name='sslkey_handler'), + url(r'^account/prefs/sslkeys/$', sslkeys_handler, name='sslkeys_handler'), + url( r'^account/prefs/sshkeys/(?P[^/]+)/$', sshkey_handler, name='sshkey_handler'), url(r'^account/prefs/sshkeys/$', sshkeys_handler, name='sshkeys_handler'), @@ -155,11 +232,25 @@ url(r'^tags/$', tags_handler, name='tags_handler'), url( r'^commissioning-results/$', - commissioning_results_handler, name='commissioning_results_handler'), + node_results_handler, name='node_results_handler'), + url( + r'^installation-results/$', + node_results_handler, name='node_results_handler'), url(r'^users/$', users_handler, name='users_handler'), url(r'^users/(?P[^/]+)/$', user_handler, name='user_handler'), url(r'^zones/(?P[^/]+)/$', zone_handler, name='zone_handler'), url(r'^zones/$', zones_handler, name='zones_handler'), + url(r'^ipaddresses/$', ipaddresses_handler, name='ipaddresses_handler'), + url( + r'^boot-resources/$', + boot_resources_handler, name='boot_resources_handler'), + url( + r'^boot-resources/(?P[^/]+)/$', + boot_resource_handler, name='boot_resource_handler'), + url( + r'^boot-resources/(?P[^/]+)/upload/(?P[^/]+)/$', + boot_resource_file_upload_handler, + name='boot_resource_file_upload_handler'), ) @@ -173,4 +264,31 @@ url( r'^commissioning-scripts/(?P[^/]+)$', commissioning_script_handler, name='commissioning_script_handler'), + url( + r'^license-keys/$', license_keys_handler, name='license_keys_handler'), + url( + r'^license-key/(?P[^/]+)/(?P[^/]+)$', + license_key_handler, name='license_key_handler'), + url(r'^boot-sources/$', + boot_sources_handler, name='boot_sources_handler'), + url(r'^boot-sources/(?P[^/]+)/$', + boot_source_handler, name='boot_source_handler'), + url(r'^boot-sources/(?P[^/]+)/selections/$', + boot_source_selections_handler, + name='boot_source_selections_handler'), + url(r'^boot-sources/(?P[^/]+)/selections/(?P[^/]+)/$', + boot_source_selection_handler, + name='boot_source_selection_handler'), + url(r'^nodegroups/(?P[^/]+)/boot-sources/$', + boot_sources_backward_handler, name='boot_sources_backward_handler'), + url(r'^nodegroups/(?P[^/]+)/boot-sources/(?P[^/]+)/$', + boot_source_backward_handler, name='boot_source_backward_handler'), + url(r'^nodegroups/(?P[^/]+)/boot-sources/(?P[^/]+)/' + 'selections/$', + boot_source_selections_backward_handler, + name='boot_source_selections_backward_handler'), + url(r'^nodegroups/(?P[^/]+)/boot-sources/(?P[^/]+)/' + 'selections/(?P[^/]+)/$', + boot_source_selection_backward_handler, + name='boot_source_selection_backward_handler'), ) diff -Nru maas-1.5.4+bzr2294/src/maasserver/urls.py maas-1.7.6+bzr3376/src/maasserver/urls.py --- maas-1.5.4+bzr2294/src/maasserver/urls.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/urls.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,6 +21,10 @@ url, ) from django.contrib.auth.decorators import user_passes_test +from maasserver.bootresources import ( + simplestreams_file_handler, + simplestreams_stream_handler, + ) from maasserver.enum import NODEGROUP_STATUS from maasserver.models import Node from maasserver.views import TextTemplateView @@ -29,7 +33,6 @@ logout, ) from maasserver.views.clusters import ( - BootImagesListView, ClusterDelete, ClusterEdit, ClusterInterfaceCreate, @@ -37,6 +40,10 @@ ClusterInterfaceEdit, ClusterListView, ) +from maasserver.views.images import ( + ImageDeleteView, + ImagesView, + ) from maasserver.views.networks import ( NetworkAdd, NetworkDelete, @@ -44,9 +51,10 @@ NetworkListView, NetworkView, ) -from maasserver.views.nodecommissionresult import ( +from maasserver.views.noderesult import ( NodeCommissionResultListView, NodeCommissionResultView, + NodeInstallResultView, ) from maasserver.views.nodes import ( enlist_preseed_view, @@ -54,6 +62,7 @@ MacDelete, NodeDelete, NodeEdit, + NodeEventListView, NodeListView, NodePreseedView, NodeView, @@ -61,6 +70,8 @@ from maasserver.views.prefs import ( SSHKeyCreateView, SSHKeyDeleteView, + SSLKeyCreateView, + SSLKeyDeleteView, userprefsview, ) from maasserver.views.settings import ( @@ -74,6 +85,11 @@ CommissioningScriptCreate, CommissioningScriptDelete, ) +from maasserver.views.settings_license_keys import ( + LicenseKeyCreate, + LicenseKeyDelete, + LicenseKeyEdit, + ) from maasserver.views.tags import TagView from maasserver.views.zones import ( ZoneAdd, @@ -101,6 +117,13 @@ 'maasserver.views', url(r'^accounts/login/$', login, name='login'), url( + r'^images-stream/streams/v1/(?P.*)$', + simplestreams_stream_handler, name='simplestreams_stream_handler'), + url( + r'^images-stream/(?P.*)/(?P.*)/(?P.*)/' + '(?P.*)/(?P.*)/(?P.*)$', + simplestreams_file_handler, name='simplestreams_file_handler'), + url( r'^robots\.txt$', TextTemplateView.as_view( template_name='maasserver/robots.txt'), name='robots'), @@ -117,6 +140,20 @@ url( r'^account/prefs/sshkey/delete/(?P\d*)/$', SSHKeyDeleteView.as_view(), name='prefs-delete-sshkey'), + url( + r'^account/prefs/sslkey/add/$', SSLKeyCreateView.as_view(), + name='prefs-add-sslkey'), + url( + r'^account/prefs/sslkey/delete/(?P\d*)/$', + SSLKeyDeleteView.as_view(), name='prefs-delete-sslkey'), + url( + r'^commissioning-results/(?P[0-9]+)/$', + NodeCommissionResultView.as_view(), + name='nodecommissionresult-view'), + url( + r'^installation-results/(?P[0-9]+)/$', + NodeInstallResultView.as_view(), + name='nodeinstallresult-view'), ) # Logout view. urlpatterns += patterns( @@ -135,6 +172,9 @@ url(r'^nodes/enlist-preseed/$', enlist_preseed_view, name='enlist-preseed-view'), url( + r'^nodes/(?P[\w\-]+)/events/$', NodeEventListView.as_view(), + name='node-event-list-view'), + url( r'^nodes/(?P[\w\-]+)/view/$', NodeView.as_view(), name='node-view'), url( @@ -178,17 +218,14 @@ r'^clusters/(?P[\w\-]+)/delete/$', ClusterDelete.as_view(), name='cluster-delete'), adminurl( - r'^clusters/(?P[\w\-]+)/bootimages/$', - BootImagesListView.as_view(), name='cluster-bootimages-list'), - adminurl( r'^clusters/(?P[\w\-]+)/interfaces/add/$', ClusterInterfaceCreate.as_view(), name='cluster-interface-create'), adminurl( - r'^clusters/(?P[\w\-]+)/interfaces/(?P[^/]*)/' + r'^clusters/(?P[\w\-]+)/interfaces/(?P[^/]*)/' 'edit/$', ClusterInterfaceEdit.as_view(), name='cluster-interface-edit'), adminurl( - r'^clusters/(?P[\w\-]+)/interfaces/(?P[^/]*)/' + r'^clusters/(?P[\w\-]+)/interfaces/(?P[^/]*)/' 'delete/$', ClusterInterfaceDelete.as_view(), name='cluster-interface-delete'), # XXX: rvb 2012-10-08 bug=1063881: @@ -196,10 +233,10 @@ # can have an empty name, thus leading to urls containing the # pattern '//' that is then reduced by apache into '/'. adminurl( - r'^clusters/(?P[\w\-]+)/interfaces/(?P)' + r'^clusters/(?P[\w\-]+)/interfaces/(?P)' 'edit/$', ClusterInterfaceEdit.as_view()), adminurl( - r'^clusters/(?P[\w\-]+)/interfaces/(?P)' + r'^clusters/(?P[\w\-]+)/interfaces/(?P)' 'delete/$', ClusterInterfaceDelete.as_view()), # /XXX adminurl(r'^settings/$', settings, name='settings'), @@ -226,9 +263,17 @@ NodeCommissionResultListView.as_view(), name='nodecommissionresult-list'), adminurl( - r'^commissioning-results/(?P[0-9]+)/$', - NodeCommissionResultView.as_view(), - name='nodecommissionresult-view'), + r'^license-key/(?P[^/]+)/(?P[^/]+)/delete/$', + LicenseKeyDelete.as_view(), + name='license-key-delete'), + adminurl( + r'^license-key/(?P[^/]+)/(?P[^/]+)/edit/$', + LicenseKeyEdit.as_view(), + name='license-key-edit'), + adminurl( + r'^license-key/add/$', + LicenseKeyCreate.as_view(), + name='license-key-add'), ) # Tag views. @@ -237,6 +282,15 @@ url(r'^tags/(?P[\w\-]+)/view/$', TagView.as_view(), name='tag-view'), ) +# Image views. +urlpatterns += patterns( + 'maasserver.views', + url(r'^images/$', ImagesView.as_view(), name='images'), + url( + r'^images/(?P[\w\-]+)/delete/$', + ImageDeleteView.as_view(), name='image-delete'), +) + # Zone views. urlpatterns += patterns( 'maasserver.views', diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/async.py maas-1.7.6+bzr3376/src/maasserver/utils/async.py --- maas-1.5.4+bzr2294/src/maasserver/utils/async.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/async.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,12 +14,19 @@ __metaclass__ = type __all__ = [ "gather", + "transactional", ] +from functools import wraps from itertools import count from Queue import Queue from crochet import wait_for_reactor +from django.db import ( + close_old_connections, + connection, + transaction, + ) from maasserver.exceptions import IteratorReusedError from twisted.internet import reactor from twisted.internet.defer import maybeDeferred @@ -57,6 +64,13 @@ Issue calls into the reactor, passing results back to another thread. + Note that `gather` does not explicitly report to the caller that it + has timed-out; calls are silently cancelled and the generator simply + reaches its end. If this information is important to your code, put + in place some mechanism to check that all expected responses have + been received, or create a modified version of thus function with + the required behaviour. + :param calls: An iterable of no-argument callables to be called in the reactor thread. Each will be called via :py:func:`~twisted.internet.defer.maybeDeferred`. @@ -124,3 +138,21 @@ # Return an iterator to the invoking thread that will stop at the # first sign of the `done` sentinel. return UseOnceIterator(queue.get, done) + + +def transactional(func): + """Decorator that wraps calls to `func` in a Django-managed transaction. + + It also ensures that connections are closed if necessary. This keeps + Django happy, especially in the test suite. + """ + @wraps(func) + def call_within_transaction(*args, **kwargs): + try: + with transaction.atomic(): + return func(*args, **kwargs) + finally: + # Close connections if we've left the outer-most atomic block. + if not connection.in_atomic_block: + close_old_connections() + return call_within_transaction diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/converters.py maas-1.7.6+bzr3376/src/maasserver/utils/converters.py --- maas-1.5.4+bzr2294/src/maasserver/utils/converters.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/converters.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,57 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Conversion utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'XMLToYAML', + ] + +from lxml import etree + + +class XMLToYAML: + """Convert XML to YAML.""" + + def __init__(self, text): + self.text = text + self.new_text = '' + self.level = 0 + self.indent_spaces = 2 + + def spaces(self): + return self.level * self.indent_spaces * ' ' + + def addText(self, element): + if '{' in element.tag: + new_tag = element.tag.strip('{').replace('}', ':') + self.new_text += "%s- %s:\n" % (self.spaces(), new_tag) + else: + self.new_text += "%s- %s:\n" % (self.spaces(), element.tag) + self.level += 1 + for key in element.keys(): + self.new_text += "%s%s: %s\n" % ( + self.spaces(), key, element.attrib[key]) + + def recurseElement(self, element): + for child in element.iterchildren(): + self.addText(child) + if child.text is not None and not child.text.isspace(): + self.new_text += "%s%s\n" % (self.spaces(), child.text.strip()) + self.recurseElement(child) + self.level -= 1 + + def convert(self): + root = etree.fromstring(self.text) + self.addText(root) + self.recurseElement(root) + return self.new_text diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/dblocks.py maas-1.7.6+bzr3376/src/maasserver/utils/dblocks.py --- maas-1.5.4+bzr2294/src/maasserver/utils/dblocks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/dblocks.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,6 +14,7 @@ __metaclass__ = type __all__ = [ "DatabaseLock", + "DatabaseXactLock", "DatabaseLockAttemptOutsideTransaction", "DatabaseLockNotHeld", ] @@ -41,7 +42,7 @@ """A particular lock was not held.""" -class DatabaseLock(tuple): +class DatabaseLockBase(tuple): """An advisory lock held in the database. Implemented using PostgreSQL's advisory locking functions. @@ -70,19 +71,13 @@ objid = property(itemgetter(1)) def __new__(cls, objid): - return super(cls, DatabaseLock).__new__(cls, (classid, objid)) + return super(DatabaseLockBase, cls).__new__(cls, (classid, objid)) def __enter__(self): - if not connection.in_atomic_block: - raise DatabaseLockAttemptOutsideTransaction(self) - with closing(connection.cursor()) as cursor: - cursor.execute("SELECT pg_advisory_lock(%s, %s)", self) + raise NotImplementedError() def __exit__(self, *exc_info): - with closing(connection.cursor()) as cursor: - cursor.execute("SELECT pg_advisory_unlock(%s, %s)", self) - if cursor.fetchone() != (True,): - raise DatabaseLockNotHeld(self) + raise NotImplementedError() def __repr__(self): return b"<%s classid=%d objid=%d>" % ( @@ -106,3 +101,62 @@ with closing(connection.cursor()) as cursor: cursor.execute(stmt, self) return len(cursor.fetchall()) >= 1 + + +def in_transaction(): + """Are we in a transaction?""" + return ( + connection.in_atomic_block or + len(connection.transaction_state) > 0) + + +class DatabaseLock(DatabaseLockBase): + """An advisory lock obtained with ``pg_advisory_lock``. + + Use this to obtain an exclusive lock on an external, shared, resource. + Avoid using this to obtain a lock for a database modification because this + lock must be released before the transaction is committed. + + In most cases you should prefer :py:class:`DatabaseXactLock` instead. + + See :py:class:`DatabaseLockBase`. + """ + + __slots__ = () + + def __enter__(self): + if not in_transaction(): + raise DatabaseLockAttemptOutsideTransaction(self) + with closing(connection.cursor()) as cursor: + cursor.execute("SELECT pg_advisory_lock(%s, %s)", self) + + def __exit__(self, *exc_info): + with closing(connection.cursor()) as cursor: + cursor.execute("SELECT pg_advisory_unlock(%s, %s)", self) + if cursor.fetchone() != (True,): + raise DatabaseLockNotHeld(self) + + +class DatabaseXactLock(DatabaseLockBase): + """An advisory lock obtained with ``pg_advisory_xact_lock``. + + Use this to obtain an exclusive lock for a modification to the database. + It can be used to synchronise access to an external resource too, but the + point of release is less explicit because it's outside of the control of + this class: the lock is only released when the transaction in which it was + obtained is committed or aborted. + + See :py:class:`DatabaseLockBase`. + """ + + __slots__ = () + + def __enter__(self): + """Obtain lock using pg_advisory_xact_lock().""" + if not in_transaction(): + raise DatabaseLockAttemptOutsideTransaction(self) + with closing(connection.cursor()) as cursor: + cursor.execute("SELECT pg_advisory_xact_lock(%s, %s)", self) + + def __exit__(self, *exc_info): + """Do nothing: this lock can only be released by the transaction.""" diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/__init__.py maas-1.7.6+bzr3376/src/maasserver/utils/__init__.py --- maas-1.5.4+bzr2294/src/maasserver/utils/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities.""" @@ -19,7 +19,7 @@ 'get_db_state', 'get_local_cluster_UUID', 'ignore_unused', - 'map_enum', + 'make_validation_error_message', 'strip_domain', 'synchronised', ] @@ -35,6 +35,7 @@ from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT from maasserver.exceptions import NodeGroupMisconfiguration from maasserver.utils.orm import get_one +from provisioningserver.utils.text import make_bullet_list def get_db_state(instance, field_name): @@ -61,20 +62,6 @@ """ -def map_enum(enum_class): - """Map out an enumeration class as a "NAME: value" dict.""" - # Filter out anything that starts with '_', which covers private and - # special methods. We can make this smarter later if we start using - # a smarter enumeration base class etc. Or if we switch to a proper - # enum mechanism, this function will act as a marker for pieces of - # code that should be updated. - return { - key: value - for key, value in vars(enum_class).items() - if not key.startswith('_') - } - - def absolute_reverse(view_name, query=None, base_url=None, *args, **kwargs): """Return the absolute URL (i.e. including the URL scheme specifier and the network location of the MAAS server). Internally this method simply @@ -152,47 +139,44 @@ ip_address = request.META['REMOTE_ADDR'] if ip_address is None: return None - else: - # Fetch nodegroups with interfaces in the requester's network, - # preferring those with managed networks first. The `NodeGroup` - # objects returned are annotated with the `management` field of the - # matching `NodeGroupInterface`. See https://docs.djangoproject.com - # /en/dev/topics/db/sql/#adding-annotations for this curious feature - # of Django's ORM. - query = NodeGroup.objects.raw(""" - SELECT - ng.*, - ngi.management - FROM - maasserver_nodegroup AS ng, - maasserver_nodegroupinterface AS ngi - WHERE - ng.id = ngi.nodegroup_id - AND - (inet %s & ngi.subnet_mask) = (ngi.ip & ngi.subnet_mask) - ORDER BY - ngi.management DESC, - ng.id ASC - """, [ip_address]) - nodegroups = list(query) - if len(nodegroups) == 0: - return None - elif len(nodegroups) == 1: - return nodegroups[0] - else: - # There are multiple matching nodegroups. Only zero or one may - # have a managed interface, otherwise it is a misconfiguration. - unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED - nodegroups_with_managed_interfaces = { - nodegroup.id for nodegroup in nodegroups - if nodegroup.management != unmanaged - } - if len(nodegroups_with_managed_interfaces) > 1: - raise NodeGroupMisconfiguration( - "Multiple clusters on the same network; only " - "one cluster may manage the network of which " - "%s is a member." % ip_address) - return nodegroups[0] + + # Fetch nodegroups with interfaces in the requester's network, + # preferring those with managed networks first. The `NodeGroup` + # objects returned are annotated with the `management` field of the + # matching `NodeGroupInterface`. See https://docs.djangoproject.com + # /en/dev/topics/db/sql/#adding-annotations for this curious feature + # of Django's ORM. + query = NodeGroup.objects.raw(""" + SELECT + ng.*, + ngi.management + FROM maasserver_nodegroup AS ng + JOIN maasserver_nodegroupinterface AS ngi ON ng.id = ngi.nodegroup_id + WHERE + inet %s BETWEEN + (ngi.ip & ngi.subnet_mask) AND + (ngi.ip | ~ngi.subnet_mask) + ORDER BY ngi.management DESC, ng.id ASC + """, [ip_address]) + nodegroups = list(query) + if len(nodegroups) == 0: + return None + if len(nodegroups) == 1: + return nodegroups[0] + + # There are multiple matching nodegroups. Only zero or one may + # have a managed interface, otherwise it is a misconfiguration. + unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED + nodegroups_with_managed_interfaces = { + nodegroup.id for nodegroup in nodegroups + if nodegroup.management != unmanaged + } + if len(nodegroups_with_managed_interfaces) > 1: + raise NodeGroupMisconfiguration( + "Multiple clusters on the same network; only " + "one cluster may manage the network of which " + "%s is a member." % ip_address) + return nodegroups[0] def synchronised(lock): @@ -209,3 +193,24 @@ return func(*args, **kwargs) return call_with_lock return synchronise + + +def gen_validation_error_messages(error): + """Return massaged messages from a :py:class:`ValidationError`.""" + message_dict = error.message_dict + for field in sorted(message_dict): + field_messages = message_dict[field] + if field == "__all__": + for field_message in field_messages: + yield field_message + else: + for field_message in field_messages: + yield "%s: %s" % (field, field_message) + + +def make_validation_error_message(error): + """Return a massaged message from a :py:class:`ValidationError`. + + The message takes the form of a textual bullet-list. + """ + return make_bullet_list(gen_validation_error_messages(error)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/interfaces.py maas-1.7.6+bzr3376/src/maasserver/utils/interfaces.py --- maas-1.5.4+bzr2294/src/maasserver/utils/interfaces.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/interfaces.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,58 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities related to network and cluster interfaces.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'get_name_and_vlan_from_cluster_interface', + 'make_name_from_interface', + ] + +from random import randint +import re + + +def make_name_from_interface(interface): + """Generate a cluster interface name based on a network interface name. + + The name is used as an identifier in API URLs, so awkward characters are + not allowed: whitespace, colons, etc. If the interface name had any such + characters in it, they are replaced with a double dash (`--`). + + If `interface` is `None`, or empty, a name will be made up. + """ + if interface is None or interface == u'': + base_name = u'unnamed-%d' % randint(1000000, 9999999) + else: + base_name = interface + return re.sub(u'[^\w:.-]', '--', base_name) + + +def get_name_and_vlan_from_cluster_interface(cluster_name, interface): + """Return a name suitable for a `Network` managed by a cluster interface. + + :param interface: Network interface name, e.g. `eth0:1`. + :param cluster_name: Name of the cluster. + :return: a tuple of the new name and the interface's VLAN tag. The VLAN + tag may be None. + """ + name = interface + vlan_tag = None + if '.' in name: + _, vlan_tag = name.split('.', 1) + if ':' in vlan_tag: + # Nasty: there's an alias after the VLAN tag. + vlan_tag, _ = vlan_tag.split(':', 1) + name = name.replace('.', '-') + name = name.replace(':', '-') + network_name = "-".join((cluster_name, name)) + return network_name, vlan_tag diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/jsenums.py maas-1.7.6+bzr3376/src/maasserver/utils/jsenums.py --- maas-1.5.4+bzr2294/src/maasserver/utils/jsenums.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/jsenums.py 2015-07-10 01:27:14.000000000 +0000 @@ -34,8 +34,6 @@ import sys from textwrap import dedent -from maasserver.utils import map_enum - # Header. Will be written on top of the output. header = dedent("""\ /* @@ -80,6 +78,24 @@ return get_enum_classes(namespace) +# This method is duplicated from provisioningserver/utils/enum.py +# because jsenums is used by the packaging to build the JS file and +# we don't want to force the packaging to require all the dependencies +# that using provisioningserver/utils/enum.py would imply. +def map_enum(enum_class): + """Map out an enumeration class as a "NAME: value" dict.""" + # Filter out anything that starts with '_', which covers private and + # special methods. We can make this smarter later if we start using + # a smarter enumeration base class etc. Or if we switch to a proper + # enum mechanism, this function will act as a marker for pieces of + # code that should be updated. + return { + key: value + for key, value in vars(enum_class).items() + if not key.startswith('_') + } + + def serialize_enum(enum): """Represent a MAAS enum class in JavaScript.""" definitions = json.dumps(map_enum(enum), indent=4, sort_keys=True) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/mac.py maas-1.7.6+bzr3376/src/maasserver/utils/mac.py --- maas-1.5.4+bzr2294/src/maasserver/utils/mac.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/mac.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,31 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""MAC-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'get_vendor_for_mac', + ] + +from netaddr import ( + EUI, + NotRegisteredError, + ) + + +def get_vendor_for_mac(mac): + """Return vendor for MAC.""" + data = EUI(mac) + try: + return data.oui.registration().org + except NotRegisteredError: + return 'Unknown Vendor' diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/network.py maas-1.7.6+bzr3376/src/maasserver/utils/network.py --- maas-1.5.4+bzr2294/src/maasserver/utils/network.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/network.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Generic helpers for `netaddr` and network-related types.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'make_network', - ] - - -from netaddr import IPNetwork - - -def make_network(ip_address, netmask_or_bits, **kwargs): - """Construct an `IPNetwork` with the given address and netmask or width. - - This is a thin wrapper for the `IPNetwork` constructor. It's here because - the constructor for `IPNetwork` is easy to get wrong. If you pass it an - IP address and a netmask, or an IP address and a bit size, it will seem to - work... but it will pick a default netmask, not the one you specified. - - :param ip_address: - :param netmask_or_bits: - :param kwargs: Any other (keyword) arguments you want to pass to the - `IPNetwork` constructor. - :raise netaddr.core.AddrFormatError: If the network specification is - malformed. - :return: An `IPNetwork` of the given base address and netmask or bit width. - """ - return IPNetwork("%s/%s" % (ip_address, netmask_or_bits), **kwargs) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/osystems.py maas-1.7.6+bzr3376/src/maasserver/utils/osystems.py --- maas-1.5.4+bzr2294/src/maasserver/utils/osystems.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/osystems.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,178 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). +"""Utilities for working with operating systems.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'get_distro_series_initial', + 'get_release_requires_key', + 'list_all_releases_requiring_keys', + 'list_all_usable_osystems', + 'list_all_usable_releases', + 'list_osystem_choices', + 'list_release_choices', + 'list_commissioning_choices', + ] + + +from operator import itemgetter + +from maasserver.clusterrpc.osystems import gen_all_known_operating_systems + + +def list_all_usable_osystems(): + """Return all operating systems that can be used for nodes.""" + osystems = [ + osystem + for osystem in gen_all_known_operating_systems() + if len(osystem['releases']) > 0 + ] + return sorted(osystems, key=itemgetter('title')) + + +def list_osystem_choices(osystems, include_default=True): + """Return Django "choices" list for `osystem`. + + :param include_default: When true includes the 'Default OS' in choice + selection. + """ + if include_default: + choices = [('', 'Default OS')] + else: + choices = [] + choices += [ + (osystem['name'], osystem['title']) + for osystem in osystems + ] + return choices + + +def list_all_usable_releases(osystems): + """Return dictionary of usable `releases` for each operating system.""" + distro_series = {} + for osystem in osystems: + distro_series[osystem['name']] = sorted( + [release for release in osystem['releases']], + key=itemgetter('title')) + return distro_series + + +def list_all_releases_requiring_keys(osystems): + """Return dictionary of OS name mapping to `releases` that require + license keys.""" + distro_series = {} + for osystem in osystems: + releases = [ + release + for release in osystem['releases'] + if release['requires_license_key'] + ] + if len(releases) > 0: + distro_series[osystem['name']] = sorted( + releases, key=itemgetter('title')) + return distro_series + + +def get_release_requires_key(release): + """Return asterisk for any release that requires + a license key. + + This is used by the JS, to display the licese_key field. + """ + if release['requires_license_key']: + return '*' + return '' + + +def list_release_choices(releases, include_default=True, + with_key_required=True): + """Return Django "choices" list for `releases`. + + :param include_default: When true includes the 'Default OS Release' in + choice selection. + :param with_key_required: When true includes the release_requires_key in + the choice. + """ + if include_default: + choices = [('', 'Default OS Release')] + else: + choices = [] + for os_name, os_releases in releases.items(): + for release in os_releases: + if with_key_required: + requires_key = get_release_requires_key(release) + else: + requires_key = '' + choices.append(( + '%s/%s%s' % (os_name, release['name'], requires_key), + release['title'] + )) + return choices + + +def get_osystem_from_osystems(osystems, name): + """Return osystem from osystems with the given name.""" + for osystem in osystems: + if osystem['name'] == name: + return osystem + return None + + +def get_release_from_osystem(osystem, name): + """Return release from osystem with the given release name.""" + for release in osystem['releases']: + if release['name'] == name: + return release + return None + + +def get_distro_series_initial(osystems, instance, with_key_required=True): + """Returns the distro_series initial value for the instance. + + :param with_key_required: When true includes the release_requires_key in + the choice. + """ + osystem_name = instance.osystem + series = instance.distro_series + osystem = get_osystem_from_osystems(osystems, osystem_name) + if not with_key_required: + key_required = '' + elif osystem is not None: + release = get_release_from_osystem(osystem, series) + if release is not None: + key_required = get_release_requires_key(release) + else: + key_required = '' + else: + # OS of the instance isn't part of the given OSes list so we can't + # figure out if the key is required or not, default to not requiring + # it. + key_required = '' + if osystem_name is not None and osystem_name != '': + if series is None: + series = '' + return '%s/%s%s' % (osystem_name, series, key_required) + return None + + +def list_commissioning_choices(osystems): + """Return Django "choices" list for releases that can be used for + commissioning.""" + ubuntu = get_osystem_from_osystems(osystems, 'ubuntu') + if ubuntu is None: + return [] + else: + releases = sorted(ubuntu['releases'], key=itemgetter('title')) + return [ + (release['name'], release['title']) + for release in releases + if release['can_commission'] + ] diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_async.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_async.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_async.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_async.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,10 +18,18 @@ from time import time import crochet +from django.db import connection from maasserver.exceptions import IteratorReusedError from maasserver.utils import async +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) from maastesting.testcase import MAASTestCase -from mock import sentinel +from mock import ( + Mock, + sentinel, + ) from testtools.matchers import ( Contains, Equals, @@ -100,7 +108,7 @@ class TestUseOnceIterator(MAASTestCase): def test_returns_correct_items_for_list(self): - expected_values = [i for i in range(10)] + expected_values = list(range(10)) iterator = async.UseOnceIterator(expected_values) actual_values = [val for val in iterator] self.assertEqual(expected_values, actual_values) @@ -113,5 +121,58 @@ iterator = async.UseOnceIterator([]) # Loop over the iterator to get to the point where we might try # and reuse it. - [i for i in iterator] + list(iterator) self.assertRaises(IteratorReusedError, iterator.next) + + +class TestTransactional(MAASTestCase): + + def test__calls_function_within_transaction_then_closes_connections(self): + close_old_connections = self.patch(async, "close_old_connections") + + # No transaction has been entered (what Django calls an atomic + # block), and old connections have not been closed. + self.assertFalse(connection.in_atomic_block) + self.assertThat(close_old_connections, MockNotCalled()) + + def check_inner(*args, **kwargs): + # In here, the transaction (`atomic`) has been started but + # is not over, and old connections have not yet been closed. + self.assertTrue(connection.in_atomic_block) + self.assertThat(close_old_connections, MockNotCalled()) + + function = Mock() + function.__name__ = self.getUniqueString() + function.side_effect = check_inner + + # Call `function` via the `transactional` decorator. + decorated_function = async.transactional(function) + decorated_function(sentinel.arg, kwarg=sentinel.kwarg) + + # `function` was called -- and therefore `check_inner` too -- + # and the arguments passed correctly. + self.assertThat(function, MockCalledOnceWith( + sentinel.arg, kwarg=sentinel.kwarg)) + + # After the decorated function has returned the transaction has + # been exited, and old connections have been closed. + self.assertFalse(connection.in_atomic_block) + self.assertThat(close_old_connections, MockCalledOnceWith()) + + def test__closes_connections_only_when_leaving_atomic_block(self): + close_old_connections = self.patch(async, "close_old_connections") + + @async.transactional + def inner(): + # We're inside a `transactional` context here. + return "inner" + + @async.transactional + def outer(): + # We're inside a `transactional` context here too. + # Call `inner`, thus nesting `transactional` contexts. + return "outer > " + inner() + + self.assertEqual("outer > inner", outer()) + # Old connections have been closed only once. + self.assertThat(close_old_connections, MockCalledOnceWith()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_converters.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_converters.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_converters.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_converters.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,43 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for converters utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from textwrap import dedent + +from maasserver.utils.converters import XMLToYAML +from maastesting.testcase import MAASTestCase + + +class TestXMLToYAML(MAASTestCase): + + def test_xml_to_yaml_converts_xml(self): + # This test is similar to the test above but this one + # checks that tags with colons works as expected. + xml = """ + + + Some Content + + """ + expected_result = dedent("""\ + - list: + - lldp:lldp: + label: LLDP neighbors + - lshw:list: + Some Content + """) + yml = XMLToYAML(xml) + self.assertEqual( + yml.convert(), expected_result) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_dblocks.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_dblocks.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_dblocks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_dblocks.py 2015-07-10 01:27:14.000000000 +0000 @@ -86,3 +86,66 @@ self.assertEqual( "" % lock, repr(lock)) + + +class TestDatabaseXactLock(MAASTestCase): + + def test_create_lock(self): + objid = self.getUniqueInteger() + lock = dblocks.DatabaseXactLock(objid) + self.assertEqual(lock, (dblocks.classid, objid)) + + def test_properties(self): + lock = dblocks.DatabaseXactLock(self.getUniqueInteger()) + self.assertEqual(lock, (lock.classid, lock.objid)) + + def test_lock_actually_locked(self): + objid = self.getUniqueInteger() + lock = dblocks.DatabaseXactLock(objid) + + with transaction.atomic(): + locks_held_before = get_locks() + with lock: + locks_held = get_locks() + locks_held_after = get_locks() + locks_held_after_txn = get_locks() + + locks_obtained = locks_held - locks_held_before + self.assertEqual({objid}, locks_obtained) + + locks_released = locks_held - locks_held_after + self.assertEqual(set(), locks_released) + + locks_released_with_txn = locks_held - locks_held_after_txn + self.assertEqual({objid}, locks_released_with_txn) + + def test_is_locked(self): + objid = self.getUniqueInteger() + lock = dblocks.DatabaseXactLock(objid) + + with transaction.atomic(): + self.assertFalse(lock.is_locked()) + with lock: + self.assertTrue(lock.is_locked()) + self.assertTrue(lock.is_locked()) + + # The lock is released with the end of the transaction. + self.assertFalse(lock.is_locked()) + + def test_obtaining_lock_fails_when_outside_of_transaction(self): + objid = self.getUniqueInteger() + lock = dblocks.DatabaseXactLock(objid) + self.assertRaises( + dblocks.DatabaseLockAttemptOutsideTransaction, + lock.__enter__) + + def test_releasing_lock_does_nothing(self): + objid = self.getUniqueInteger() + lock = dblocks.DatabaseXactLock(objid) + self.assertIsNone(lock.__exit__()) + + def test_repr(self): + lock = dblocks.DatabaseXactLock(self.getUniqueInteger()) + self.assertEqual( + "" % lock, + repr(lock)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_interfaces.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_interfaces.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_interfaces.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_interfaces.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,113 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for network/cluster interface helpers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from random import randint + +from maasserver.utils.interfaces import ( + get_name_and_vlan_from_cluster_interface, + make_name_from_interface, + ) +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase + + +class TestMakeNameFromInterface(MAASTestCase): + """Tests for `make_name_from_interface`.""" + + def test__passes_name_unchanged(self): + name = factory.make_name('itf9:2') + self.assertEqual(name, make_name_from_interface(name)) + + def test__escapes_weird_characters(self): + self.assertEqual('x--y', make_name_from_interface('x?y')) + self.assertEqual('x--y', make_name_from_interface('x y')) + + def test__makes_up_name_if_no_interface_given(self): + self.assertNotIn(make_name_from_interface(None), (None, '')) + self.assertNotIn(make_name_from_interface(''), (None, '')) + + def test__makes_up_unique_name_if_no_interface_given(self): + self.assertNotEqual( + make_name_from_interface(''), + make_name_from_interface('')) + + +class TestGetNameAndVlanFromClusterInterface(MAASTestCase): + """Tests for `get_name_and_vlan_from_cluster_interface`.""" + + def make_interface(self): + """Return a simple network interface name.""" + return 'eth%d' % randint(0, 99) + + def test_returns_simple_name_unaltered(self): + cluster = factory.make_name('cluster') + interface = factory.make_name('iface') + expected_name = '%s-%s' % (cluster, interface) + self.assertEqual( + (expected_name, None), + get_name_and_vlan_from_cluster_interface(cluster, interface)) + + def test_substitutes_colon(self): + cluster = factory.make_name('cluster') + base_interface = self.make_interface() + alias = randint(0, 99) + interface = '%s:%d' % (base_interface, alias) + expected_name = '%s-%s-%d' % (cluster, base_interface, alias) + self.assertEqual( + (expected_name, None), + get_name_and_vlan_from_cluster_interface(cluster, interface)) + + def test_returns_with_vlan_tag(self): + cluster = factory.make_name('cluster') + base_interface = self.make_interface() + vlan_tag = factory.make_vlan_tag() + interface = '%s.%d' % (base_interface, vlan_tag) + expected_name = '%s-%s-%d' % (cluster, base_interface, vlan_tag) + self.assertEqual( + (expected_name, '%d' % vlan_tag), + get_name_and_vlan_from_cluster_interface(cluster, interface)) + + def test_returns_name_with_alias_and_vlan_tag(self): + cluster = factory.make_name('cluster') + base_interface = self.make_interface() + vlan_tag = factory.make_vlan_tag() + alias = randint(0, 99) + interface = '%s:%d.%d' % (base_interface, alias, vlan_tag) + expected_name = '%s-%s-%d-%d' % ( + cluster, + base_interface, + alias, + vlan_tag, + ) + self.assertEqual( + (expected_name, '%d' % vlan_tag), + get_name_and_vlan_from_cluster_interface(cluster, interface)) + + def test_returns_name_with_vlan_tag_and_alias(self): + cluster = factory.make_name('cluster') + base_interface = self.make_interface() + vlan_tag = factory.make_vlan_tag() + alias = randint(0, 99) + interface = '%s.%d:%d' % (base_interface, vlan_tag, alias) + expected_name = '%s-%s-%d-%d' % ( + cluster, + base_interface, + vlan_tag, + alias, + ) + self.assertEqual( + (expected_name, '%d' % vlan_tag), + get_name_and_vlan_from_cluster_interface(cluster, interface)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_jsenums.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_jsenums.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_jsenums.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_jsenums.py 2015-07-10 01:27:14.000000000 +0000 @@ -16,7 +16,6 @@ from inspect import getsourcefile -from maasserver.utils import map_enum from maasserver.utils.jsenums import ( dump, footer, @@ -25,6 +24,7 @@ serialize_enum, ) from maastesting.testcase import MAASTestCase +from provisioningserver.utils.enum import map_enum class ENUM: diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_mac.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_mac.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_mac.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_mac.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,32 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test MAC utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + + +from maasserver.utils.mac import get_vendor_for_mac +from maastesting.testcase import MAASTestCase + + +class TestGetVendorForMac(MAASTestCase): + + def test_get_vendor_for_mac_returns_vendor(self): + self.assertEqual( + "ELITEGROUP COMPUTER SYSTEMS CO., LTD.", + get_vendor_for_mac('ec:a8:6b:fd:ae:3f')) + + def test_get_vendor_for_mac_returns_error_message_if_unknown_mac(self): + self.assertEqual( + "Unknown Vendor", + get_vendor_for_mac('aa:bb:cc:dd:ee:ff')) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_network.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_network.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_network.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_network.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for network helpers.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from maasserver.utils import network as network_module -from maasserver.utils.network import make_network -from maastesting.testcase import MAASTestCase -import mock -from netaddr import IPNetwork - - -class TestMakeNetwork(MAASTestCase): - - def test_constructs_IPNetwork(self): - network = make_network('10.22.82.0', 24) - self.assertIsInstance(network, IPNetwork) - self.assertEqual(IPNetwork('10.22.82.0/24'), network) - - def test_passes_args_to_IPNetwork(self): - self.patch(network_module, 'IPNetwork') - make_network('10.1.2.0', 24, foo=9) - self.assertEqual( - [mock.call('10.1.2.0/24', foo=9)], - network_module.IPNetwork.mock_calls) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_orm.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_orm.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_orm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_orm.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test ORM utilities.""" @@ -61,20 +61,20 @@ self.assertIsNone(get_one([])) def test_get_one_returns_single_list_item(self): - item = factory.getRandomString() + item = factory.make_string() self.assertEqual(item, get_one([item])) def test_get_one_returns_None_from_any_empty_sequence(self): self.assertIsNone(get_one("no item" for counter in range(0))) def test_get_one_returns_item_from_any_sequence_of_length_one(self): - item = factory.getRandomString() + item = factory.make_string() self.assertEqual(item, get_one(item for counter in range(1))) def test_get_one_does_not_trigger_database_counting(self): # Avoid typical performance pitfall of querying objects *and* # the number of objects. - item = factory.getRandomString() + item = factory.make_string() sequence = FakeQueryResult(type(item), [item]) sequence.__len__ = Mock(side_effect=Exception("len() was called")) self.assertEqual(item, get_one(sequence)) @@ -112,11 +112,11 @@ self.assertIsNone(get_first([])) def test_get_first_returns_first_item(self): - items = [factory.getRandomString() for counter in range(10)] + items = [factory.make_string() for counter in range(10)] self.assertEqual(items[0], get_first(items)) def test_get_first_accepts_any_sequence(self): - item = factory.getRandomString() + item = factory.make_string() self.assertEqual(item, get_first(repeat(item))) def test_get_first_does_not_retrieve_beyond_first_item(self): diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_osystems.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_osystems.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_osystems.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_osystems.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,255 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `maasserver.utils.osystems`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from operator import itemgetter +import random + +from maasserver.clusterrpc.testing.osystems import ( + make_rpc_osystem, + make_rpc_release, + ) +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.utils import osystems as osystems_module +from maasserver.utils.osystems import ( + get_distro_series_initial, + get_release_requires_key, + list_all_releases_requiring_keys, + list_all_usable_osystems, + list_all_usable_releases, + list_commissioning_choices, + list_osystem_choices, + list_release_choices, + ) + + +class TestOsystems(MAASServerTestCase): + + def patch_gen_all_known_operating_systems(self, osystems): + self.patch( + osystems_module, + 'gen_all_known_operating_systems').return_value = osystems + + def test_list_all_usable_osystems(self): + osystems = [make_rpc_osystem() for _ in range(3)] + self.patch_gen_all_known_operating_systems(osystems) + self.assertItemsEqual(osystems, list_all_usable_osystems()) + + def test_list_all_usable_osystems_sorts_title(self): + osystems = [make_rpc_osystem() for _ in range(3)] + self.patch_gen_all_known_operating_systems(osystems) + self.assertEqual( + sorted(osystems, key=itemgetter('title')), + list_all_usable_osystems()) + + def test_list_all_usable_osystems_removes_os_without_releases(self): + osystems = [make_rpc_osystem() for _ in range(3)] + without_releases = make_rpc_osystem(releases=[]) + self.patch_gen_all_known_operating_systems( + osystems + [without_releases]) + self.assertItemsEqual(osystems, list_all_usable_osystems()) + + def test_list_osystem_choices_includes_default(self): + self.assertEqual( + [('', 'Default OS')], + list_osystem_choices([], include_default=True)) + + def test_list_osystem_choices_doesnt_include_default(self): + self.assertEqual([], list_osystem_choices([], include_default=False)) + + def test_list_osystem_choices_uses_name_and_title(self): + osystem = make_rpc_osystem() + self.assertEqual( + [(osystem['name'], osystem['title'])], + list_osystem_choices([osystem], include_default=False)) + + +class TestReleases(MAASServerTestCase): + + def make_release_choice(self, osystem, release, include_asterisk=False): + key = '%s/%s' % (osystem['name'], release['name']) + if include_asterisk: + return ('%s*' % key, release['title']) + return (key, release['title']) + + def test_list_all_usable_releases(self): + releases = [make_rpc_release() for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + self.assertItemsEqual( + releases, list_all_usable_releases([osystem])[osystem['name']]) + + def test_list_all_usable_releases_sorts(self): + releases = [make_rpc_release() for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + releases = sorted(releases, key=itemgetter('title')) + self.assertEqual( + releases, list_all_usable_releases([osystem])[osystem['name']]) + + def test_list_all_releases_requiring_keys(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + release_without_license_key = make_rpc_release( + requires_license_key=False) + osystem = make_rpc_osystem( + releases=releases + [release_without_license_key]) + self.assertItemsEqual( + releases, + list_all_releases_requiring_keys([osystem])[osystem['name']]) + + def test_list_all_releases_requiring_keys_sorts(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + release_without_license_key = make_rpc_release( + requires_license_key=False) + osystem = make_rpc_osystem( + releases=releases + [release_without_license_key]) + releases = sorted(releases, key=itemgetter('title')) + self.assertEqual( + releases, + list_all_releases_requiring_keys([osystem])[osystem['name']]) + + def test_get_release_requires_key_returns_asterisk_when_required(self): + release = make_rpc_release(requires_license_key=True) + self.assertEqual('*', get_release_requires_key(release)) + + def test_get_release_requires_key_returns_empty_when_not_required(self): + release = make_rpc_release(requires_license_key=False) + self.assertEqual('', get_release_requires_key(release)) + + def test_list_release_choices_includes_default(self): + self.assertEqual( + [('', 'Default OS Release')], + list_release_choices({}, include_default=True)) + + def test_list_release_choices_doesnt_include_default(self): + self.assertEqual([], list_release_choices({}, include_default=False)) + + def test_list_release_choices(self): + releases = [make_rpc_release() for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + choices = [ + self.make_release_choice(osystem, release) + for release in releases + ] + self.assertItemsEqual( + choices, + list_release_choices( + list_all_usable_releases([osystem]), + include_default=False)) + + def test_list_release_choices_sorts(self): + releases = [make_rpc_release() for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + choices = [ + self.make_release_choice(osystem, release) + for release in sorted(releases, key=itemgetter('title')) + ] + self.assertEqual( + choices, + list_release_choices( + list_all_usable_releases([osystem]), + include_default=False)) + + def test_list_release_choices_includes_requires_key_asterisk(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + choices = [ + self.make_release_choice(osystem, release, include_asterisk=True) + for release in releases + ] + self.assertItemsEqual( + choices, + list_release_choices( + list_all_usable_releases([osystem]), + include_default=False)) + + def test_get_distro_series_initial(self): + releases = [make_rpc_release() for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + release = random.choice(releases) + node = factory.make_Node( + osystem=osystem['name'], distro_series=release['name']) + self.assertEqual( + '%s/%s' % (osystem['name'], release['name']), + get_distro_series_initial( + [osystem], node, with_key_required=False)) + + def test_get_distro_series_initial_without_key_required(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + release = random.choice(releases) + node = factory.make_Node( + osystem=osystem['name'], distro_series=release['name']) + self.assertEqual( + '%s/%s' % (osystem['name'], release['name']), + get_distro_series_initial( + [osystem], node, with_key_required=False)) + + def test_get_distro_series_initial_with_key_required(self): + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + release = random.choice(releases) + node = factory.make_Node( + osystem=osystem['name'], distro_series=release['name']) + self.assertEqual( + '%s/%s*' % (osystem['name'], release['name']), + get_distro_series_initial( + [osystem], node, with_key_required=True)) + + def test_get_distro_series_initial_works_around_conflicting_os(self): + # Test for bug 1456892. + releases = [ + make_rpc_release(requires_license_key=True) for _ in range(3)] + osystem = make_rpc_osystem(releases=releases) + release = random.choice(releases) + node = factory.make_Node( + osystem=osystem['name'], distro_series=release['name']) + self.assertEqual( + '%s/%s' % (osystem['name'], release['name']), + get_distro_series_initial( + [], node, with_key_required=True)) + + def test_list_commissioning_choices_returns_empty_list_if_not_ubuntu(self): + osystem = make_rpc_osystem() + self.assertEqual([], list_commissioning_choices([osystem])) + + def test_list_commissioning_choices_returns_commissioning_releases(self): + comm_releases = [ + make_rpc_release(can_commission=True) for _ in range(3)] + no_comm_release = make_rpc_release() + osystem = make_rpc_osystem( + 'ubuntu', releases=comm_releases + [no_comm_release]) + choices = [ + (release['name'], release['title']) + for release in comm_releases + ] + self.assertItemsEqual(choices, list_commissioning_choices([osystem])) + + def test_list_commissioning_choices_returns_sorted(self): + comm_releases = [ + make_rpc_release(can_commission=True) for _ in range(3)] + osystem = make_rpc_osystem( + 'ubuntu', releases=comm_releases) + comm_releases = sorted( + comm_releases, key=itemgetter('title')) + choices = [ + (release['name'], release['title']) + for release in comm_releases + ] + self.assertEqual(choices, list_commissioning_choices([osystem])) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_utils.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_utils.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,6 +19,7 @@ from urllib import urlencode from django.conf import settings +from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.http import HttpRequest from django.test.client import RequestFactory @@ -35,74 +36,39 @@ find_nodegroup, get_db_state, get_local_cluster_UUID, - map_enum, + make_validation_error_message, strip_domain, synchronised, ) from maastesting.testcase import MAASTestCase from mock import sentinel -from netaddr import IPNetwork - - -class TestEnum(MAASTestCase): - - def test_map_enum_includes_all_enum_values(self): - - class Enum: - ONE = 1 - TWO = 2 - - self.assertItemsEqual(['ONE', 'TWO'], map_enum(Enum).keys()) - - def test_map_enum_omits_private_or_special_methods(self): - - class Enum: - def __init__(self): - pass - - def __repr__(self): - return "Enum" - - def _save(self): - pass - - VALUE = 9 - - self.assertItemsEqual(['VALUE'], map_enum(Enum).keys()) - - def test_map_enum_maps_values(self): - - class Enum: - ONE = 1 - THREE = 3 - - self.assertEqual({'ONE': 1, 'THREE': 3}, map_enum(Enum)) +from netaddr import IPAddress class TestAbsoluteReverse(MAASServerTestCase): def test_absolute_reverse_uses_DEFAULT_MAAS_URL_by_default(self): - maas_url = 'http://%s' % factory.getRandomString() + maas_url = 'http://%s' % factory.make_string() self.patch(settings, 'DEFAULT_MAAS_URL', maas_url) absolute_url = absolute_reverse('settings') expected_url = settings.DEFAULT_MAAS_URL + reverse('settings') self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_given_base_url(self): - maas_url = 'http://%s' % factory.getRandomString() + maas_url = 'http://%s' % factory.make_string() absolute_url = absolute_reverse('settings', base_url=maas_url) expected_url = maas_url + reverse('settings') self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_query_string(self): self.patch(settings, 'DEFAULT_MAAS_URL', '') - parameters = {factory.getRandomString(): factory.getRandomString()} + parameters = {factory.make_string(): factory.make_string()} absolute_url = absolute_reverse('settings', query=parameters) expected_url = '%s?%s' % (reverse('settings'), urlencode(parameters)) self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_kwargs(self): - node = factory.make_node() + node = factory.make_Node() self.patch(settings, 'DEFAULT_MAAS_URL', '') absolute_url = absolute_reverse( 'node-view', kwargs={'system_id': node.system_id}) @@ -110,7 +76,7 @@ self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_args(self): - node = factory.make_node() + node = factory.make_Node() self.patch(settings, 'DEFAULT_MAAS_URL', '') absolute_url = absolute_reverse('node-view', args=[node.system_id]) expected_url = reverse('node-view', args=[node.system_id]) @@ -121,9 +87,9 @@ """Testing for the method `get_db_state`.""" def test_get_db_state_returns_db_state(self): - status = factory.getRandomChoice(NODE_STATUS_CHOICES) - node = factory.make_node(status=status) - another_status = factory.getRandomChoice( + status = factory.pick_choice(NODE_STATUS_CHOICES) + node = factory.make_Node(status=status) + another_status = factory.pick_choice( NODE_STATUS_CHOICES, but_not=[status]) node.status = another_status self.assertEqual(status, get_db_state(node, 'status')) @@ -210,28 +176,47 @@ self.assertIsNone(get_local_cluster_UUID()) def test_get_local_cluster_UUID_returns_cluster_UUID(self): - uuid = factory.getRandomUUID() + uuid = factory.make_UUID() file_name = self.make_file(contents='CLUSTER_UUID="%s"' % uuid) self.patch(settings, 'LOCAL_CLUSTER_CONFIG', file_name) self.assertEqual(uuid, get_local_cluster_UUID()) -def get_request(origin_ip): - return RequestFactory().post('/', REMOTE_ADDR=origin_ip) +def make_request(origin_ip): + """Return a fake HTTP request with the given remote address.""" + return RequestFactory().post('/', REMOTE_ADDR=unicode(origin_ip)) class TestFindNodegroup(MAASServerTestCase): + scenarios = [ + ('ipv4', {'network_factory': factory.make_ipv4_network}), + ('ipv6', {'network_factory': factory.make_ipv6_network}), + ] + + def make_cluster_interface(self, network, management=None): + """Create a cluster interface. + + The interface is managed by default. + """ + if management is None: + management = factory.pick_enum( + NODEGROUPINTERFACE_MANAGEMENT, + but_not=[NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED]) + cluster = factory.make_NodeGroup() + return factory.make_NodeGroupInterface( + cluster, network=network, management=management) + def test_find_nodegroup_looks_up_nodegroup_by_controller_ip(self): - nodegroup = factory.make_node_group() - [interface] = nodegroup.get_managed_interfaces() + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface(nodegroup) self.assertEqual( nodegroup, - find_nodegroup(get_request(interface.ip))) + find_nodegroup(make_request(interface.ip))) def test_find_nodegroup_returns_None_if_not_found(self): - self.assertIsNone( - find_nodegroup(get_request(factory.getRandomIPAddress()))) + requesting_ip = factory.pick_ip_in_network(self.network_factory()) + self.assertIsNone(find_nodegroup(make_request(requesting_ip))) # # Finding a node's nodegroup (aka cluster controller) in a nutshell: @@ -243,58 +228,96 @@ # def test_1_managed_interface(self): - nodegroup = factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, - network=IPNetwork("192.168.41.0/24")) + network = self.network_factory() + interface = self.make_cluster_interface(network) self.assertEqual( - nodegroup, find_nodegroup(get_request('192.168.41.199'))) + interface.nodegroup, + find_nodegroup( + make_request(factory.pick_ip_in_network(network)))) def test_1_managed_interface_and_1_unmanaged(self): # The managed nodegroup is chosen in preference to the unmanaged # nodegroup. - nodegroup = factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, - network=IPNetwork("192.168.41.0/24")) - factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, - network=IPNetwork("192.168.41.0/16")) - self.assertEqual( - nodegroup, find_nodegroup(get_request('192.168.41.199'))) + network = self.network_factory() + interface = self.make_cluster_interface(network) + self.make_cluster_interface( + network, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + self.assertEqual( + interface.nodegroup, + find_nodegroup( + make_request(factory.pick_ip_in_network(network)))) def test_more_than_1_managed_interface(self): - factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, - network=IPNetwork("192.168.41.0/16")) - factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, - network=IPNetwork("192.168.41.0/24")) + network = self.network_factory() + requesting_ip = factory.pick_ip_in_network(network) + self.make_cluster_interface(network=network) + self.make_cluster_interface(network=network) exception = self.assertRaises( NodeGroupMisconfiguration, - find_nodegroup, get_request('192.168.41.199')) + find_nodegroup, make_request(requesting_ip)) self.assertEqual( (httplib.CONFLICT, "Multiple clusters on the same network; only " "one cluster may manage the network of which " - "192.168.41.199 is a member."), + "%s is a member." % requesting_ip), (exception.api_error, "%s" % exception)) def test_1_unmanaged_interface(self): - nodegroup = factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, - network=IPNetwork("192.168.41.0/24")) + network = self.network_factory() + interface = self.make_cluster_interface(network) self.assertEqual( - nodegroup, find_nodegroup(get_request('192.168.41.199'))) + interface.nodegroup, + find_nodegroup( + make_request(factory.pick_ip_in_network(network)))) def test_more_than_1_unmanaged_interface(self): - nodegroup1 = factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, - network=IPNetwork("192.168.41.0/16")) - factory.make_node_group( - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, - network=IPNetwork("192.168.41.0/24")) + network = self.network_factory() + interfaces = [ + self.make_cluster_interface( + network, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) + for _ in range(2) + ] self.assertEqual( - nodegroup1, find_nodegroup(get_request('192.168.41.199'))) + interfaces[0].nodegroup, + find_nodegroup( + make_request(factory.pick_ip_in_network(network)))) + + def test_handles_mixed_IPv4_and_IPv6(self): + matching_network = self.network_factory() + requesting_ip = factory.pick_ip_in_network(matching_network) + self.make_cluster_interface(factory.make_ipv4_network()) + self.make_cluster_interface(factory.make_ipv6_network()) + matching_interface = self.make_cluster_interface(matching_network) + self.assertEqual( + matching_interface.nodegroup, + find_nodegroup(make_request(requesting_ip))) + + def test_includes_lower_bound(self): + network = self.network_factory() + interface = self.make_cluster_interface(network) + self.assertEqual( + interface.nodegroup, + find_nodegroup(make_request(IPAddress(network.first)))) + + def test_includes_upper_bound(self): + network = self.network_factory() + interface = self.make_cluster_interface(network) + self.assertEqual( + interface.nodegroup, + find_nodegroup(make_request(IPAddress(network.last)))) + + def test_excludes_lower_bound_predecessor(self): + network = self.network_factory() + self.make_cluster_interface(network) + self.assertIsNone( + find_nodegroup(make_request(IPAddress(network.first - 1)))) + + def test_excludes_upper_bound_successor(self): + network = self.network_factory() + self.make_cluster_interface(network) + self.assertIsNone( + find_nodegroup(make_request(IPAddress(network.last + 1)))) class TestSynchronised(MAASTestCase): @@ -310,3 +333,18 @@ self.assertFalse(lock.locked()) self.assertEqual(sentinel.called, example_synchronised_function()) self.assertFalse(lock.locked()) + + +class TestMakeValidationErrorMessage(MAASTestCase): + + def test__formats_message_with_all_errors(self): + error = ValidationError({ + "foo": [ValidationError("bar")], + "alice": [ValidationError("bob")], + "__all__": ["all is lost"], + }) + self.assertEqual( + "* all is lost\n" + "* alice: bob\n" + "* foo: bar", + make_validation_error_message(error)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_version.py maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_version.py --- maas-1.5.4+bzr2294/src/maasserver/utils/tests/test_version.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/tests/test_version.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,217 @@ +# Copyright 2015 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test version utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import random + +from bzrlib.errors import NotBranchError +from maasserver.utils import version +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import ( + MagicMock, + sentinel, + ) +from testtools.matchers import Is + + +class TestGetVersionFromAPT(MAASTestCase): + + def test__creates_cache_with_None_progress(self): + mock_Cache = self.patch(version.apt_pkg, "Cache") + version.get_version_from_apt(version.REGION_PACKAGE_NAME) + self.assertThat(mock_Cache, MockCalledOnceWith(None)) + + def test__returns_empty_string_if_package_not_in_cache(self): + self.patch(version.apt_pkg, "Cache") + self.assertEquals( + "", + version.get_version_from_apt(version.REGION_PACKAGE_NAME)) + + def test__returns_empty_string_if_not_current_ver_from_package(self): + package = MagicMock() + package.current_ver = None + mock_cache = { + version.REGION_PACKAGE_NAME: package, + } + self.patch(version.apt_pkg, "Cache").return_value = mock_cache + self.assertEquals( + "", + version.get_version_from_apt(version.REGION_PACKAGE_NAME)) + + def test__returns_ver_str_from_package(self): + package = MagicMock() + package.current_ver.ver_str = sentinel.ver_str + mock_cache = { + version.REGION_PACKAGE_NAME: package, + } + self.patch(version.apt_pkg, "Cache").return_value = mock_cache + self.assertIs( + sentinel.ver_str, + version.get_version_from_apt(version.REGION_PACKAGE_NAME)) + + +class TestGetMAASBranch(MAASTestCase): + + def test__returns_None_if_Branch_is_None(self): + self.patch(version, "Branch", None) + self.assertIsNone(version.get_maas_branch()) + + def test__calls_Branch_open_with_current_dir(self): + mock_open = self.patch(version.Branch, "open") + mock_open.return_value = sentinel.branch + self.expectThat(version.get_maas_branch(), Is(sentinel.branch)) + self.expectThat(mock_open, MockCalledOnceWith(".")) + + def test__returns_None_on_NotBranchError(self): + mock_open = self.patch(version.Branch, "open") + mock_open.side_effect = NotBranchError("") + self.assertIsNone(version.get_maas_branch()) + + +class TestExtractVersionSubversion(MAASTestCase): + + scenarios = [ + ("with ~", { + "version": "1.8.0~alpha4+bzr356-0ubuntu1", + "output": ("1.8.0", "alpha4+bzr356"), + }), + ("without ~", { + "version": "1.8.0+bzr356-0ubuntu1", + "output": ("1.8.0", "+bzr356"), + }), + ("without ~ or +", { + "version": "1.8.0-0ubuntu1", + "output": ("1.8.0", ""), + }), + ] + + def test__returns_version_subversion(self): + self.assertEquals( + self.output, version.extract_version_subversion(self.version)) + + +class TestVersionTestCase(MAASTestCase): + """MAASTestCase that resets the cache used by utility methods.""" + + def setUp(self): + super(TestVersionTestCase, self).setUp() + self.patch(version, "_cache", {}) + + +class TestGetMAASPackageVersion(TestVersionTestCase): + + def test__calls_get_version_from_apt(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = sentinel.version + self.expectThat( + version.get_maas_package_version(), Is(sentinel.version)) + self.expectThat( + mock_apt, MockCalledOnceWith(version.REGION_PACKAGE_NAME)) + + +class TestGetMAASVersionSubversion(TestVersionTestCase): + + def test__returns_package_version(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" + self.assertEquals( + ("1.8.0", "alpha4+bzr356"), version.get_maas_version_subversion()) + + def test__returns_unknown_if_version_is_empty_and_not_bzr_branch(self): + mock_version = self.patch(version, "get_version_from_apt") + mock_version.return_value = "" + mock_branch = self.patch(version, "get_maas_branch") + mock_branch.return_value = None + self.assertEquals( + ("unknown", ""), version.get_maas_version_subversion()) + + def test__returns_from_source_and_revno_from_branch(self): + mock_version = self.patch(version, "get_version_from_apt") + mock_version.return_value = "" + revno = random.randint(1, 5000) + mock_branch = self.patch(version, "get_maas_branch") + mock_branch.return_value.revno.return_value = revno + self.assertEquals( + ("from source (+bzr%s)" % revno, ""), + version.get_maas_version_subversion()) + + +class TestGetMAASVersionUI(TestVersionTestCase): + + def test__returns_package_version(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" + self.assertEquals( + "1.8.0 (alpha4+bzr356)", version.get_maas_version_ui()) + + def test__returns_unknown_if_version_is_empty_and_not_bzr_branch(self): + mock_version = self.patch(version, "get_version_from_apt") + mock_version.return_value = "" + mock_branch = self.patch(version, "get_maas_branch") + mock_branch.return_value = None + self.assertEquals("unknown", version.get_maas_version_ui()) + + def test__returns_from_source_and_revno_from_branch(self): + mock_version = self.patch(version, "get_version_from_apt") + mock_version.return_value = "" + revno = random.randint(1, 5000) + mock_branch = self.patch(version, "get_maas_branch") + mock_branch.return_value.revno.return_value = revno + self.assertEquals( + "from source (+bzr%s)" % revno, version.get_maas_version_ui()) + + +class TestGetMAASDocVersion(TestVersionTestCase): + + def test__returns_doc_version_with_greater_than_1_decimals(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" + self.assertEquals("docs1.8", version.get_maas_doc_version()) + + def test__returns_doc_version_with_equal_to_1_decimals(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = "1.8~alpha4+bzr356-0ubuntu1" + self.assertEquals("docs1.8", version.get_maas_doc_version()) + + def test__returns_just_doc_if_version_is_empty(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = "" + self.assertEquals("docs", version.get_maas_doc_version()) + + +class TestVersionMethodsCached(TestVersionTestCase): + + scenarios = [ + ("get_maas_package_version", dict(method="get_maas_package_version")), + ("get_maas_version_subversion", dict( + method="get_maas_version_subversion")), + ("get_maas_version_ui", dict(method="get_maas_version_ui")), + ("get_maas_doc_version", dict(method="get_maas_doc_version")), + ] + + def test_method_is_cached(self): + mock_apt = self.patch(version, "get_version_from_apt") + mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" + cached_method = getattr(version, self.method) + first_return_value = cached_method() + second_return_value = cached_method() + # The return value is not empty (full unit tests have been performed + # earlier). + self.assertNotIn(first_return_value, [b'', u'', None]) + self.assertEqual(first_return_value, second_return_value) + # Apt has only been called once. + self.expectThat( + mock_apt, MockCalledOnceWith(version.REGION_PACKAGE_NAME)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/utils/version.py maas-1.7.6+bzr3376/src/maasserver/utils/version.py --- maas-1.5.4+bzr2294/src/maasserver/utils/version.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/utils/version.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,131 @@ +# Copyright 2015 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Version utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "get_maas_doc_version", + "get_maas_version_subversion", + "get_maas_version_ui", + ] + +import apt_pkg + + +try: + from bzrlib.branch import Branch + from bzrlib.errors import NotBranchError +except ImportError: + Branch = None + +# Initialize apt_pkg. +apt_pkg.init() + +# Name of maas package to get version from. +REGION_PACKAGE_NAME = "maas-region-controller-min" + + +def get_version_from_apt(package): + """Return the version output from `apt_pkg.Cache` for the given package.""" + cache = apt_pkg.Cache(None) + version = None + if package in cache: + apt_package = cache[package] + version = apt_package.current_ver + + if version is not None: + return version.ver_str + else: + return "" + + +def extract_version_subversion(version): + """Return a tuple (version, subversion) from the given apt version.""" + if "~" in version: + main_version, extra = version.split("~", 1) + return main_version, extra.split("-", 1)[0] + elif "+" in version: + main_version, extra = version.split("+", 1) + return main_version, "+" + extra.split("-", 1)[0] + else: + return version.split("-", 1)[0], '' + + +def get_maas_branch(): + """Return the `bzrlib.branch.Branch` for this running MAAS.""" + if Branch is None: + return None + try: + return Branch.open(".") + except NotBranchError: + return None + + +_cache = {} + + +# A very simply memoize function: when we switch to Django 1.7 we should use +# Django's lru_cache method. +def simple_cache(fun): + def wrapped(*args, **kwargs): + key = hash(repr(fun) + repr(args) + repr(kwargs)) + if not key in _cache: + _cache[key] = fun(*args, **kwargs) + return _cache[key] + + wrapped.__doc__ = "%s %s" % (fun.__doc__, "(cached)") + return wrapped + + +@simple_cache +def get_maas_package_version(): + """Return the apt version for the main MAAS package.""" + return get_version_from_apt(REGION_PACKAGE_NAME) + + +@simple_cache +def get_maas_version_subversion(): + """Return a tuple with the MAAS version and the MAAS subversion.""" + apt_version = get_maas_package_version() + if apt_version: + return extract_version_subversion(apt_version) + else: + # Get the branch information + branch = get_maas_branch() + if branch is None: + # Not installed not in branch, then no way to identify. This should + # not happen, but just in case. + return "unknown", '' + else: + return "from source (+bzr%s)" % branch.revno(), '' + + +@simple_cache +def get_maas_version_ui(): + """Return the version string for the running MAAS region. + + The returned string is suitable to display in the UI. + """ + version, subversion = get_maas_version_subversion() + return "%s (%s)" % (version, subversion) if subversion else version + + +@simple_cache +def get_maas_doc_version(): + """Return the doc version for the running MAAS region.""" + doc_prefix = 'docs' + apt_version = get_maas_package_version() + if apt_version: + version, _ = extract_version_subversion(apt_version) + return doc_prefix + '.'.join(version.split('.')[:2]) + else: + return doc_prefix diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/clusters.py maas-1.7.6+bzr3376/src/maasserver/views/clusters.py --- maas-1.5.4+bzr2294/src/maasserver/views/clusters.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/clusters.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Cluster views.""" @@ -13,7 +13,6 @@ __metaclass__ = type __all__ = [ - "BootImagesListView", "ClusterDelete", "ClusterEdit", "ClusterInterfaceCreate", @@ -47,6 +46,7 @@ NodeGroupInterfaceForm, ) from maasserver.models import ( + BootResource, NodeGroup, NodeGroupInterface, ) @@ -108,12 +108,13 @@ context['current_count'] = NodeGroup.objects.filter( status=self.status).count() context['title'] = self.make_cluster_listing_title() - # Display warnings for clusters that have no images, but only for the - # display of 'accepted' clusters. - context['warn_no_images'] = self.status == NODEGROUP_STATUS.ACCEPTED + # Display warnings (no images, cluster not connected) for clusters, + # but only for the display of 'accepted' clusters. + context['display_warnings'] = self.status == NODEGROUP_STATUS.ACCEPTED context['status'] = self.status context['statuses'] = NODEGROUP_STATUS context['status_name'] = NODEGROUP_STATUS_CHOICES[self.status][1] + context['region_has_images'] = BootResource.objects.exists() return context def post(self, request, *args, **kwargs): @@ -130,16 +131,6 @@ messages.info(request, "Rejected %d cluster(s)." % number) return HttpResponseRedirect(reverse('cluster-list')) - elif 'import_all_boot_images' in request.POST: - # Import PXE files for all the accepted clusters. - NodeGroup.objects.import_boot_images_accepted_clusters() - message = ( - "Import of boot images started on all cluster controllers. " - "Importing the boot images can take a long time depending on " - "the available bandwidth.") - messages.info(request, message) - return HttpResponseRedirect(reverse('cluster-list')) - else: # Unknown action: redirect to the cluster listing page (this # shouldn't happen). @@ -152,10 +143,19 @@ form_class = NodeGroupEdit context_object_name = 'cluster' + def get_form_kwargs(self): + kwargs = super(ClusterEdit, self).get_form_kwargs() + # The cluster form has a boolean checkbox. For those we need to know + # whether a submission came in from the UI (where omitting the field + # means "set to False") or from the API (where it means "leave + # unchanged"). + kwargs['ui_submission'] = True + return kwargs + def get_context_data(self, **kwargs): context = super(ClusterEdit, self).get_context_data(**kwargs) context['interfaces'] = ( - self.object.nodegroupinterface_set.all().order_by('interface')) + self.object.nodegroupinterface_set.all().order_by('name')) return context def get_success_url(self): @@ -195,9 +195,9 @@ def get_object(self): uuid = self.kwargs.get('uuid', None) - interface = self.kwargs.get('interface', None) + name = self.kwargs.get('name', None) return get_object_or_404( - NodeGroupInterface, nodegroup__uuid=uuid, interface=interface) + NodeGroupInterface, nodegroup__uuid=uuid, name=name) def get_next_url(self): uuid = self.kwargs.get('uuid', None) @@ -206,7 +206,7 @@ def delete(self, request, *args, **kwargs): interface = self.get_object() interface.delete() - messages.info(request, "Interface %s deleted." % interface.interface) + messages.info(request, "Interface %s deleted." % interface.name) return HttpResponseRedirect(self.get_next_url()) @@ -225,9 +225,9 @@ def get_object(self): uuid = self.kwargs.get('uuid', None) - interface = self.kwargs.get('interface', None) + name = self.kwargs.get('name', None) return get_object_or_404( - NodeGroupInterface, nodegroup__uuid=uuid, interface=interface) + NodeGroupInterface, nodegroup__uuid=uuid, name=name) class ClusterInterfaceCreate(CreateView): @@ -259,26 +259,3 @@ ClusterInterfaceCreate, self).get_context_data(**kwargs) context['nodegroup'] = self.get_nodegroup() return context - - -class BootImagesListView(PaginatedListView): - - template_name = 'maasserver/bootimage-list.html' - context_object_name = 'bootimage_list' - - def get_nodegroup(self): - nodegroup_uuid = self.kwargs.get('uuid', None) - return get_object_or_404(NodeGroup, uuid=nodegroup_uuid) - - def get_context_data(self, **kwargs): - context = super( - BootImagesListView, self).get_context_data(**kwargs) - context['nodegroup'] = self.get_nodegroup() - return context - - def get_queryset(self): - nodegroup = self.get_nodegroup() - # A sorted bootimages list. - return nodegroup.bootimage_set.all().order_by( - '-release', 'architecture', 'subarchitecture', 'purpose', - 'label') diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/images.py maas-1.7.6+bzr3376/src/maasserver/views/images.py --- maas-1.5.4+bzr2294/src/maasserver/views/images.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/images.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,661 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Image views.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "ImagesView", + "ImageDeleteView", + ] + +from collections import defaultdict +import json + +from distro_info import UbuntuDistroInfo +from django.core.exceptions import PermissionDenied +from django.core.urlresolvers import reverse +from django.http import ( + HttpResponse, + HttpResponseForbidden, + HttpResponseRedirect, + ) +from django.shortcuts import get_object_or_404 +from django.views.generic.base import TemplateView +from django.views.generic.edit import ( + FormMixin, + ProcessFormView, + ) +from maasserver.bootresources import ( + import_resources, + is_import_resources_running, + ) +from maasserver.bootsources import get_os_info_from_boot_sources +from maasserver.clusterrpc.boot_images import ( + get_available_boot_images, + is_import_boot_images_running, + ) +from maasserver.clusterrpc.osystems import get_os_release_title +from maasserver.enum import ( + BOOT_RESOURCE_TYPE, + NODE_STATUS, + ) +from maasserver.models import ( + BootResource, + BootSourceCache, + BootSourceSelection, + Config, + LargeFile, + Node, + ) +from maasserver.views import HelpfulDeleteView +from requests import ConnectionError + + +def format_size(size): + """Formats the size into human readable.""" + for x in ['bytes', 'KB', 'MB', 'GB']: + if size < 1024.0: + return "%3.1f %s" % (size, x) + size /= 1024.0 + return "%3.1f %s" % (size, ' TB') + + +def get_distro_series_info_row(series): + """Returns the distro series row information from python-distro-info. + """ + info = UbuntuDistroInfo() + for row in info._avail(info._date): + if row['series'] == series: + return row + return None + + +def format_ubuntu_distro_series(series): + """Formats the Ubuntu distro series into a version name.""" + row = get_distro_series_info_row(series) + if row is None: + return series + return row['version'] + + +class ImagesView(TemplateView, FormMixin, ProcessFormView): + template_name = 'maasserver/images.html' + context_object_name = "images" + status = None + + def __init__(self, *args, **kwargs): + super(ImagesView, self).__init__(*args, **kwargs) + + # Load the Ubuntu info from the `BootSource`'s. This is done in + # __init__ so that it is not done, more that once. + try: + sources, releases, arches = get_os_info_from_boot_sources('ubuntu') + self.connection_error = False + self.ubuntu_sources = sources + self.ubuntu_releases = releases + self.ubuntu_arches = arches + except ConnectionError: + self.connection_error = True + self.ubuntu_sources = [] + self.ubuntu_releases = set() + self.ubuntu_arches = set() + + def get(self, request, *args, **kwargs): + # Load all the nodes, so its not done on every call + # to the method get_number_of_nodes_deployed_for. + self.nodes = Node.objects.filter( + status__in=[NODE_STATUS.DEPLOYED, NODE_STATUS.DEPLOYING]).only( + 'osystem', 'distro_series') + self.default_osystem = Config.objects.get_config( + 'default_osystem') + self.default_distro_series = Config.objects.get_config( + 'default_distro_series') + + # Load list of boot resources that currently exist on all clusters. + cluster_images = get_available_boot_images() + self.clusters_syncing = is_import_boot_images_running() + self.cluster_resources = ( + BootResource.objects.get_resources_matching_boot_images( + cluster_images)) + + # If the request is ajax, then return the list of resources as json. + if request.is_ajax(): + return self.ajax(request, *args, **kwargs) + return super(ImagesView, self).get(request, *args, **kwargs) + + def get_context_data(self, **kwargs): + """Return context data that is passed into the template.""" + context = super(ImagesView, self).get_context_data(**kwargs) + context['region_import_running'] = is_import_resources_running() + context['cluster_import_running'] = self.clusters_syncing + context['connection_error'] = self.connection_error + context['ubuntu_streams_count'] = len(self.ubuntu_sources) + context['ubuntu_releases'] = self.format_ubuntu_releases() + context['ubuntu_arches'] = self.format_ubuntu_arches() + context['other_resources'] = self.get_other_resources() + context['generated_resources'] = self.get_generated_resources() + context['uploaded_resources'] = self.get_uploaded_resources() + return context + + def post(self, request, *args, **kwargs): + """Handle a POST request.""" + # Only administrators can change options on this page. + if not self.request.user.is_superuser: + return HttpResponseForbidden() + if 'ubuntu_images' in request.POST: + releases = request.POST.getlist('release') + arches = request.POST.getlist('arch') + self.update_source_selection( + self.ubuntu_sources[0], 'ubuntu', releases, arches) + return HttpResponseRedirect(reverse('images')) + elif 'other_images' in request.POST: + images = request.POST.getlist('image') + self.update_other_images_source_selection(images) + return HttpResponseRedirect(reverse('images')) + else: + # Unknown action: redirect to the images page (this + # shouldn't happen). + return HttpResponseRedirect(reverse('images')) + + def get_ubuntu_release_selections(self): + """Return list of all selected releases for Ubuntu. If first item in + tuple is true, then all releases are selected by wildcard.""" + all_selected = False + releases = set() + for selection in BootSourceSelection.objects.all(): + if selection.os == "ubuntu": + if selection.release == "*": + all_selected = True + else: + releases.add(selection.release) + return all_selected, releases + + def format_ubuntu_releases(self): + """Return formatted Ubuntu release selections for the template.""" + releases = [] + all_releases, selected_releases = self.get_ubuntu_release_selections() + for release in sorted(list(self.ubuntu_releases), reverse=True): + if all_releases or release in selected_releases: + checked = True + else: + checked = False + releases.append({ + 'name': release, + 'title': format_ubuntu_distro_series(release), + 'checked': checked, + }) + return releases + + def get_ubuntu_arch_selections(self): + """Return list of all selected arches for Ubuntu. If first item in + tuple is true, then all arches are selected by wildcard.""" + all_selected = False + arches = set() + for selection in BootSourceSelection.objects.all(): + if selection.os == "ubuntu": + for arch in selection.arches: + if arch == "*": + all_selected = True + else: + arches.add(arch) + return all_selected, arches + + def format_ubuntu_arches(self): + """Return formatted Ubuntu architecture selections for the template.""" + arches = [] + all_arches, selected_arches = self.get_ubuntu_arch_selections() + for arch in sorted(list(self.ubuntu_arches)): + if all_arches or arch in selected_arches: + checked = True + else: + checked = False + arches.append({ + 'name': arch, + 'title': arch, + 'checked': checked, + }) + return arches + + def get_resource_title(self, resource): + """Return the title for the resource based on the type and name.""" + rtypes_with_split_names = [ + BOOT_RESOURCE_TYPE.SYNCED, + BOOT_RESOURCE_TYPE.GENERATED, + ] + if resource.rtype in rtypes_with_split_names: + os, series = resource.name.split('/') + if resource.name.startswith('ubuntu/'): + return format_ubuntu_distro_series(series) + else: + title = get_os_release_title(os, series) + if title is None: + return resource.name + else: + return title + else: + if 'title' in resource.extra and len(resource.extra['title']) > 0: + return resource.extra['title'] + else: + return resource.name + + def add_resource_template_attributes(self, resource): + """Adds helper attributes to the resource.""" + resource.title = self.get_resource_title(resource) + resource.arch, resource.subarch = resource.split_arch() + resource.number_of_nodes = self.get_number_of_nodes_deployed_for( + resource) + resource_set = resource.get_latest_set() + if resource_set is None: + resource.size = format_size(0) + resource.last_update = resource.updated + resource.complete = False + resource.status = "Queued for download" + resource.downloading = False + else: + resource.size = format_size(resource_set.total_size) + resource.last_update = resource_set.updated + resource.complete = resource_set.complete + if not resource.complete: + progress = resource_set.progress + if progress > 0: + resource.status = "Downloading %3.0f%%" % progress + resource.downloading = True + else: + resource.status = "Queued for download" + resource.downloading = False + else: + # See if the resource also exists on all the clusters. + if resource in self.cluster_resources: + resource.status = "Complete" + resource.downloading = False + else: + resource.complete = False + if self.clusters_syncing: + resource.status = "Syncing to clusters" + resource.downloading = True + else: + resource.status = "Waiting for clusters to sync" + resource.downloading = False + + def node_has_architecture_for_resource(self, node, resource): + """Return True if node is the same architecture as resource.""" + arch, _ = resource.split_arch() + node_arch, node_subarch = node.split_arch() + return arch == node_arch and resource.supports_subarch(node_subarch) + + def get_number_of_nodes_deployed_for(self, resource): + """Return number of nodes that are deploying the given + os, series, and architecture.""" + if resource.rtype == BOOT_RESOURCE_TYPE.UPLOADED: + osystem = 'custom' + distro_series = resource.name + else: + osystem, distro_series = resource.name.split('/') + + # Count the number of nodes with same os/release and architecture. + count = 0 + for node in self.nodes.filter( + osystem=osystem, distro_series=distro_series): + if self.node_has_architecture_for_resource(node, resource): + count += 1 + + # Any node that is deployed without osystem and distro_series, + # will be using the defaults. + if (self.default_osystem == osystem and + self.default_distro_series == distro_series): + for node in self.nodes.filter( + osystem="", distro_series=""): + if self.node_has_architecture_for_resource(node, resource): + count += 1 + return count + + def update_source_selection(self, boot_source, os, releases, arches): + # Remove all selections, that are not of release. + BootSourceSelection.objects.filter( + boot_source=boot_source, os=os).exclude( + release__in=releases).delete() + + if len(releases) > 0: + # Create or update the selections. + for release in releases: + selection, _ = BootSourceSelection.objects.get_or_create( + boot_source=boot_source, os=os, release=release) + selection.arches = arches + selection.subarches = ["*"] + selection.labels = ["*"] + selection.save() + else: + # Create a selection that will cause nothing to be downloaded, + # since no releases are selected. + selection, _ = BootSourceSelection.objects.get_or_create( + boot_source=boot_source, os=os, release="") + selection.arches = arches + selection.subarches = ["*"] + selection.labels = ["*"] + selection.save() + + # Start the import process, now that the selections have changed. + import_resources() + + def get_other_synced_resources(self): + """Return all synced resources that are not Ubuntu.""" + resources = list(BootResource.objects.filter( + rtype=BOOT_RESOURCE_TYPE.SYNCED).exclude( + name__startswith='ubuntu/').order_by('-name', 'architecture')) + for resource in resources: + self.add_resource_template_attributes(resource) + return resources + + def check_if_image_matches_resource(self, resource, image): + """Return True if the resource matches the image.""" + os, series = resource.name.split('/') + arch, subarch = resource.split_arch() + if os != image.os or series != image.release or arch != image.arch: + return False + if not resource.supports_subarch(subarch): + return False + return True + + def get_matching_resource_for_image(self, resources, image): + """Return True if the image matches one of the resources.""" + for resource in resources: + if self.check_if_image_matches_resource(resource, image): + return resource + return None + + def get_other_resources(self): + """Return all other resources if they are synced or not.""" + # Get the resource that already exist in the + resources = self.get_other_synced_resources() + images = list(BootSourceCache.objects.exclude(os='ubuntu')) + for image in images: + resource = self.get_matching_resource_for_image(resources, image) + if resource is None: + image.exists = False + image.complete = False + image.size = '-' + image.last_update = 'not synced' + image.status = "" + image.downloading = False + image.number_of_nodes = '-' + else: + self.add_resource_template_attributes(resource) + image.exists = True + image.complete = resource.complete + image.size = resource.size + image.last_update = resource.last_update + image.status = resource.status + image.downloading = resource.downloading + image.number_of_nodes = ( + self.get_number_of_nodes_deployed_for(resource)) + image.title = get_os_release_title(image.os, image.release) + if image.title is None: + image.title = '%s/%s' % (image.os, image.release) + + # Only superusers can change selections about other images, so we only + # show the images that already exist for standard users. + if not self.request.user.is_superuser: + images = [ + image + for image in images + if image.exists + ] + return images + + def update_other_images_source_selection(self, images): + """Update `BootSourceSelection`'s to only include the selected + images.""" + # Remove all selections that are not Ubuntu. + BootSourceSelection.objects.exclude(os='ubuntu').delete() + + # Break down the images into os/release with multiple arches. + selections = defaultdict(list) + for image in images: + os, arch, _, release = image.split('/', 4) + name = '%s/%s' % (os, release) + selections[name].append(arch) + + # Create each selection for the source. + for name, arches in selections.items(): + os, release = name.split('/') + cache = BootSourceCache.objects.filter( + os=os, arch=arch, release=release).first() + if cache is None: + # It is possible the cache changed while waiting for the user + # to perform an action. Ignore the selection as its no longer + # available. + continue + # Create the selection for the source. + BootSourceSelection.objects.create( + boot_source=cache.boot_source, + os=os, release=release, + arches=arches, subarches=["*"], labels=["*"]) + + # Start the import process, now that the selections have changed. + import_resources() + + def get_generated_resources(self): + """Return all generated resources.""" + resources = list(BootResource.objects.filter( + rtype=BOOT_RESOURCE_TYPE.GENERATED).order_by( + '-name', 'architecture')) + for resource in resources: + self.add_resource_template_attributes(resource) + return resources + + def get_uploaded_resources(self): + """Return all uploaded resources, for usage in the template.""" + resources = list(BootResource.objects.filter( + rtype=BOOT_RESOURCE_TYPE.UPLOADED).order_by( + 'name', 'architecture')) + for resource in resources: + self.add_resource_template_attributes(resource) + return resources + + def pick_latest_datetime(self, time, other_time): + """Return the datetime that is the latest.""" + if time is None: + return other_time + return max([time, other_time]) + + def calculate_unique_size_for_resources(self, resources): + """Return size of all unique largefiles for the given resources.""" + shas = set() + size = 0 + for resource in resources: + resource_set = resource.get_latest_set() + if resource_set is None: + continue + for rfile in resource_set.files.all(): + try: + largefile = rfile.largefile + except LargeFile.DoesNotExist: + continue + if largefile.sha256 not in shas: + size += largefile.total_size + shas.add(largefile.sha256) + return size + + def are_all_resources_complete(self, resources): + """Return the complete status for all the given resources.""" + for resource in resources: + resource_set = resource.get_latest_set() + if resource_set is None: + return False + if not resource_set.complete: + return False + return True + + def get_last_update_for_resources(self, resources): + """Return the latest updated time for all resources.""" + last_update = None + for resource in resources: + last_update = self.pick_latest_datetime( + last_update, resource.updated) + resource_set = resource.get_latest_set() + if resource_set is not None: + last_update = self.pick_latest_datetime( + last_update, resource_set.updated) + return last_update + + def get_number_of_nodes_for_resources(self, resources): + """Return the number of nodes used by all resources.""" + return sum([ + self.get_number_of_nodes_deployed_for(resource) + for resource in resources]) + + def get_progress_for_resources(self, resources): + """Return the overall progress for all resources.""" + size = 0 + total_size = 0 + for resource in resources: + resource_set = resource.get_latest_set() + if resource_set is not None: + size += resource_set.size + total_size += resource_set.total_size + if size <= 0: + # Handle division by zero + return 0 + return 100.0 * (size / float(total_size)) + + def resource_group_to_resource(self, group): + """Convert the list of resources into one resource to be used in + the UI.""" + # Calculate all of the values using all of the resources for + # this combination. + last_update = self.get_last_update_for_resources(group) + unique_size = self.calculate_unique_size_for_resources(group) + number_of_nodes = self.get_number_of_nodes_for_resources(group) + complete = self.are_all_resources_complete(group) + progress = self.get_progress_for_resources(group) + + # Set the computed attributes on the first resource as that will + # be the only one returned to the UI. + resource = group[0] + resource.arch, resource.subarch = resource.split_arch() + resource.title = self.get_resource_title(resource) + resource.complete = complete + resource.size = format_size(unique_size) + resource.last_update = last_update + resource.number_of_nodes = number_of_nodes + resource.complete = complete + if not complete: + if progress > 0: + resource.status = "Downloading %3.0f%%" % progress + resource.downloading = True + else: + resource.status = "Queued for download" + resource.downloading = False + else: + # See if all the resources exist on all the clusters. + cluster_has_resources = any( + res in group for res in self.cluster_resources) + if cluster_has_resources: + resource.status = "Complete" + resource.downloading = False + else: + resource.complete = False + if self.clusters_syncing: + resource.status = "Syncing to clusters" + resource.downloading = True + else: + resource.status = "Waiting for clusters to sync" + resource.downloading = False + return resource + + def combine_resources(self, resources): + """Return a list of resources combining all of subarchitecture + resources into one resource.""" + resource_group = defaultdict(list) + for resource in resources: + arch = resource.split_arch()[0] + key = '%s/%s' % (resource.name, arch) + resource_group[key].append(resource) + return [ + self.resource_group_to_resource(group) + for _, group in resource_group.items() + ] + + def ajax(self, request, *args, **kwargs): + """Return all resources in a json object. + + This is used by the image model list on the client side to update + the status of images.""" + resources = self.combine_resources(BootResource.objects.all()) + json_resources = [ + dict( + id=resource.id, + rtype=resource.rtype, name=resource.name, + title=resource.title, arch=resource.arch, size=resource.size, + complete=resource.complete, status=resource.status, + downloading=resource.downloading, + numberOfNodes=resource.number_of_nodes, + lastUpdate=resource.last_update.strftime('%c')) + for resource in resources + ] + data = dict( + region_import_running=is_import_resources_running(), + cluster_import_running=self.clusters_syncing, + resources=json_resources) + json_data = json.dumps(data) + return HttpResponse(json_data, mimetype='application/json') + + +class ImageDeleteView(HelpfulDeleteView): + + template_name = 'maasserver/image_confirm_delete.html' + context_object_name = 'image_to_delete' + model = BootResource + + def post(self, request, *args, **kwargs): + if not request.user.is_superuser: + raise PermissionDenied() + return super(ImageDeleteView, self).post(request, *args, **kwargs) + + def get_object(self): + resource_id = self.kwargs.get('resource_id', None) + resource = get_object_or_404(BootResource, id=resource_id) + if resource.rtype == BOOT_RESOURCE_TYPE.SYNCED: + raise PermissionDenied() + if resource.rtype == BOOT_RESOURCE_TYPE.UPLOADED: + if 'title' in resource.extra: + resource.title = resource.extra['title'] + else: + resource.title = resource.name + else: + os, release = resource.name.split('/') + title = get_os_release_title(os, release) + if title is not None: + resource.title = title + else: + resource.title = resource.name + return resource + + def get_next_url(self): + return reverse('images') + + def name_object(self, obj): + """See `HelpfulDeleteView`.""" + title = "" + if obj.rtype == BOOT_RESOURCE_TYPE.UPLOADED: + if 'title' in obj.extra: + title = obj.extra['title'] + else: + title = obj.name + else: + os, release = obj.name.split('/') + rpc_title = get_os_release_title(os, release) + if rpc_title is not None: + title = rpc_title + else: + title = obj.name + return "%s (%s)" % (title, obj.architecture) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/nodecommissionresult.py maas-1.7.6+bzr3376/src/maasserver/views/nodecommissionresult.py --- maas-1.5.4+bzr2294/src/maasserver/views/nodecommissionresult.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/nodecommissionresult.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Views for node commissioning results.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'NodeCommissionResultListView', - ] - -from django.shortcuts import get_object_or_404 -from django.views.generic import DetailView -from maasserver.models import Node -from maasserver.views import PaginatedListView -from metadataserver.models import NodeCommissionResult - - -class NodeCommissionResultListView(PaginatedListView): - - template_name = 'maasserver/nodecommissionresult-list.html' - context_object_name = 'results_list' - - def get_filter_system_ids(self): - """Return the list of nodes that were selected for filtering.""" - return self.request.GET.getlist('node') - - def get_context_data(self, **kwargs): - context = super(NodeCommissionResultListView, self).get_context_data( - **kwargs) - system_ids = self.get_filter_system_ids() - if system_ids is not None and len(system_ids) > 0: - nodes = Node.objects.filter(system_id__in=system_ids) - context['nodes_filter'] = ', '.join( - sorted(node.hostname for node in nodes)) - return context - - def get_queryset(self): - results = NodeCommissionResult.objects.all() - system_ids = self.get_filter_system_ids() - if system_ids is not None and len(system_ids) > 0: - results = results.filter(node__system_id__in=system_ids) - return results.order_by('node', '-created', 'name') - - -class NodeCommissionResultView(DetailView): - - template_name = 'metadataserver/nodecommissionresult.html' - - def get_object(self): - result_id = self.kwargs.get('id') - return get_object_or_404(NodeCommissionResult, id=result_id) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/noderesult.py maas-1.7.6+bzr3376/src/maasserver/views/noderesult.py --- maas-1.5.4+bzr2294/src/maasserver/views/noderesult.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/noderesult.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,79 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Views for node commissioning/installation results.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'NodeCommissionResultListView', + ] + +from django.core.exceptions import PermissionDenied +from django.shortcuts import get_object_or_404 +from django.views.generic import DetailView +from maasserver.models import Node +from maasserver.views import PaginatedListView +from metadataserver.enum import RESULT_TYPE +from metadataserver.models import NodeResult + + +class NodeCommissionResultListView(PaginatedListView): + + template_name = 'metadataserver/nodecommissionresult_list.html' + context_object_name = 'results_list' + + def get_filter_system_ids(self): + """Return the list of nodes that were selected for filtering.""" + return self.request.GET.getlist('node') + + def get_context_data(self, **kwargs): + context = super(NodeCommissionResultListView, self).get_context_data( + **kwargs) + system_ids = self.get_filter_system_ids() + if system_ids is not None and len(system_ids) > 0: + nodes = Node.objects.filter(system_id__in=system_ids) + context['nodes_filter'] = ', '.join( + sorted(node.hostname for node in nodes)) + return context + + def get_queryset(self): + results = NodeResult.objects.filter( + result_type=RESULT_TYPE.COMMISSIONING) + system_ids = self.get_filter_system_ids() + if system_ids is not None and len(system_ids) > 0: + results = results.filter(node__system_id__in=system_ids) + return results.order_by('node', '-created', 'name') + + +class NodeCommissionResultView(DetailView): + + template_name = 'metadataserver/nodecommissionresult.html' + + def get_object(self): + result_id = self.kwargs.get('id') + result = get_object_or_404(NodeResult, id=result_id) + if not self.request.user.is_superuser and \ + self.request.user != result.node.owner: + raise PermissionDenied + return result + + +class NodeInstallResultView(DetailView): + + template_name = 'metadataserver/nodeinstallresult.html' + + def get_object(self): + result_id = self.kwargs.get('id') + result = get_object_or_404(NodeResult, id=result_id) + if not self.request.user.is_superuser and \ + self.request.user != result.node.owner: + raise PermissionDenied + return result diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/nodes.py maas-1.7.6+bzr3376/src/maasserver/views/nodes.py --- maas-1.5.4+bzr2294/src/maasserver/views/nodes.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/nodes.py 2015-07-10 01:27:14.000000000 +0000 @@ -17,6 +17,7 @@ 'MacAdd', 'MacDelete', 'NodeDelete', + 'NodeEventListView', 'NodeListView', 'NodePreseedView', 'NodeView', @@ -25,18 +26,27 @@ ] from cgi import escape +import json +import logging +import re +from textwrap import dedent from urllib import urlencode -from django.conf import settings as django_settings from django.contrib import messages -from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse -from django.http import QueryDict +from django.db.models import Q +from django.http import ( + HttpResponse, + QueryDict, + ) from django.shortcuts import ( get_object_or_404, render_to_response, ) -from django.template import RequestContext +from django.template import ( + loader, + RequestContext, + ) from django.utils.safestring import mark_safe from django.views.generic import ( CreateView, @@ -48,16 +58,14 @@ ProcessFormView, ) from lxml import etree -from maasserver import logger from maasserver.clusterrpc.power_parameters import get_power_types from maasserver.enum import ( + NODE_BOOT, NODE_PERMISSION, NODE_STATUS, + NODE_STATUS_CHOICES_DICT, ) -from maasserver.exceptions import ( - MAASAPIException, - NoRabbit, - ) +from maasserver.exceptions import MAASAPIException from maasserver.forms import ( BulkNodeActionForm, get_action_form, @@ -65,13 +73,17 @@ MACAddressForm, SetZoneBulkAction, ) -from maasserver.messages import MESSAGING from maasserver.models import ( MACAddress, Node, + StaticIPAddress, Tag, ) from maasserver.models.config import Config +from maasserver.models.event import ( + Event, + EventType, + ) from maasserver.models.nodeprobeddetails import get_single_probed_details from maasserver.node_action import ACTIONS_DICT from maasserver.node_constraint_filter_forms import ( @@ -81,30 +93,31 @@ from maasserver.preseed import ( get_enlist_preseed, get_preseed, + OS_WITH_IPv6_SUPPORT, ) from maasserver.third_party_drivers import get_third_party_driver +from maasserver.utils.converters import XMLToYAML from maasserver.views import ( HelpfulDeleteView, PaginatedListView, ) -from metadataserver.models import NodeCommissionResult +from metadataserver.enum import RESULT_TYPE +from metadataserver.models import NodeResult +from netaddr import IPAddress from provisioningserver.tags import merge_details_cleanly -from textwrap import dedent - -def get_longpoll_context(): - messaging = MESSAGING.get() - if messaging is not None and django_settings.LONGPOLL_PATH is not None: - try: - return { - 'longpoll_queue': messaging.getQueue().name, - 'LONGPOLL_PATH': django_settings.LONGPOLL_PATH, - } - except NoRabbit as e: - logger.warn("Could not connect to RabbitMQ: %s", e) - return {} - else: - return {} +# Fields on the Node model that will be searched. +NODE_SEARCH_FIELDS = { + 'status': 'status__in', + 'hostname': 'hostname__icontains', + 'owner': 'owner__username__icontains', + 'arch': 'architecture__icontains', + 'zone': 'zone__name__icontains', + 'power': 'power_state__icontains', + 'cluster': 'nodegroup__name__icontains', + 'tag': 'tags__name__icontains', + 'mac': 'macaddress__mac_address__icontains', + } def _parse_constraints(query_string): @@ -152,7 +165,7 @@ (not_actionable, not_actionable_templates), (not_permitted, not_permitted_templates)] message = [] - for number, message_templates in number_message: + for index, (number, message_templates) in enumerate(number_message): singular, plural = message_templates if number != 0: message_template = singular if number == 1 else plural @@ -160,7 +173,8 @@ # Override the action name so that only the first sentence will # contain the full name of the action. action_name = 'It' - return ' '.join(message) + level = index + return ' '.join(message), ('info', 'warning', 'error')[level] def prefetch_nodes_listing(nodes_query): @@ -192,11 +206,104 @@ return mark_safe("[\n%s\n]" % ',\n'.join(names)) +def node_to_dict(node, event_log_count=0): + """Convert `Node` to a dictionary. + + :param event_log_count: Number of entries from the event log to add to + the dictionary. + """ + if node.owner is None: + owner = "" + else: + owner = '%s' % node.owner + pxe_mac = node.get_pxe_mac() + node_dict = dict( + id=node.id, + system_id=node.system_id, + url=reverse('node-view', args=[node.system_id]), + hostname=node.hostname, + architecture=node.architecture, + fqdn=node.fqdn, + status=node.display_status(), + owner=owner, + cpu_count=node.cpu_count, + memory=node.display_memory(), + storage=node.display_storage(), + power_state=node.power_state, + zone=node.zone.name, + zone_url=reverse('zone-view', args=[node.zone.name]), + mac=None if pxe_mac is None else pxe_mac.mac_address.get_raw(), + vendor=node.get_pxe_mac_vendor(), + macs=[mac.mac_address.get_raw() for mac in node.get_extra_macs()], + ) + if event_log_count != 0: + # Add event information to the generated node dictionary. We exclude + # debug after we calculate the count, so we show the correct total + # number of events. + node_events = Event.objects.filter(node=node) + total_num_events = node_events.count() + + # We fetch the IDs of the EventTypes that are non-DEBUG here + # because in MAAS 1.7 the EventType.level field is missing an + # index. That makes querying for Events whose EventType has a + # non-DEBUG level (using an INNER JOIN) slow at scale. Doing it + # this way speeds things up considerably. + non_debug_event_type_ids = EventType.objects.exclude( + level=logging.DEBUG).values_list('id', flat=True) + non_debug_events = node_events.filter( + type__id__in=non_debug_event_type_ids).order_by('-id') + if event_log_count > 0: + # Limit the number of events. + events = non_debug_events.all()[:event_log_count] + displayed_events_count = len(events) + node_dict['events'] = dict( + total=total_num_events, + count=displayed_events_count, + events=[event_to_dict(event) for event in events], + more_url=reverse('node-event-list-view', args=[node.system_id]), + ) + return node_dict + + +def event_to_dict(event): + """Convert `Event` to a dictionary.""" + return dict( + id=event.id, + level=event.type.level_str, + created=event.created.strftime('%a, %d %b. %Y %H:%M:%S'), + type=event.type.description, + description=event.description + ) + + +def convert_query_status(value): + """Convert the given value into a list of status integers.""" + value = value.lower() + ids = [] + for status_id, status_text in NODE_STATUS_CHOICES_DICT.items(): + status_text = status_text.lower() + if value in status_text: + ids.append(status_id) + if len(ids) == 0: + return None + return ids + + class NodeListView(PaginatedListView, FormMixin, ProcessFormView): context_object_name = "node_list" form_class = BulkNodeActionForm - sort_fields = ('hostname', 'status', 'zone') + sort_fields = ( + 'hostname', 'status', 'owner', 'cpu_count', + 'memory', 'storage', 'zone') + late_sort_fields = { + 'primary_mac': ( + lambda node1, node2: cmp( + unicode(node1.get_primary_mac()), + unicode(node2.get_primary_mac()), + ) + ), + } def populate_modifiers(self, request): self.query = request.GET.get("query") @@ -206,6 +313,9 @@ def get(self, request, *args, **kwargs): """Handle a GET request.""" + if request.is_ajax(): + return self.handle_ajax_request(request, *args, **kwargs) + self.populate_modifiers(request) if Config.objects.get_config("enable_third_party_drivers"): @@ -225,7 +335,7 @@ These are sorting and search option we want a POST request to preserve so that the display after a POST request is similar to the display before the request.""" - return ["dir", "query", "page", "sort"] + return ["dir", "query", "test", "page", "sort"] def get_preserved_query(self): params = { @@ -266,8 +376,8 @@ action_class = SetZoneBulkAction else: action_class = ACTIONS_DICT[form.cleaned_data['action']] - message = message_from_form_stats(action_class, *stats) - messages.info(self.request, message) + message, level = message_from_form_stats(action_class, *stats) + getattr(messages, level)(self.request, message) return super(NodeListView, self).form_valid(form) def _compose_sort_order(self): @@ -284,10 +394,9 @@ if self.sort_dir == 'desc': custom_order = '-%s' % custom_order order_by = (custom_order, ) - return order_by + ('-created', ) - def _constrain_nodes(self, nodes_query): + def _constrain_nodes(self, nodes_query, query): """Filter the given nodes query by user-specified constraints. If the specified constraints are invalid, this will set an error and @@ -296,7 +405,7 @@ :param nodes_query: A query set of nodes. :return: A query set of nodes that returns a subset of `nodes_query`. """ - data = _parse_constraints(self.query) + data = _parse_constraints(query) form = AcquireNodeForm.Strict(data=data) # Change the field names of the AcquireNodeForm object to # conform to Juju's naming. @@ -309,13 +418,97 @@ for field, errors in form.errors.items()]) return Node.objects.none() + def _query_all_fields(self, term): + """Build query that will search all fields in the node table.""" + sub_query = Q() + for field, query_term in NODE_SEARCH_FIELDS.items(): + if field == 'status': + status_ids = convert_query_status(term) + if status_ids is None: + continue + sub_query = sub_query | Q(**{query_term: status_ids}) + else: + sub_query = sub_query | Q(**{query_term: term}) + return sub_query + + def _query_specific_field(self, field, value): + """Build query that will search the specific field from the given term + in the node table. + + This supports the term as "field:value", allowing users to be + specific on which field they want to search. + """ + if field == 'status': + value = convert_query_status(value) + if value is None: + return Q() + # Perform the query based on the ORM matcher specified + # in NODE_SEARCH_FIELDS. + if field in NODE_SEARCH_FIELDS: + return Q(**{NODE_SEARCH_FIELDS[field]: value}) + return Q() + + def _query_mac_address_field(self, term): + """Build query that will search the MAC addresses in the node table. + + Note: If you want to query the mac address using only the first + two octets, then you need to use 'mac:aa:bb' or the first octet will + be mistaken as the field to search. + """ + term = term.replace('mac:', '') + return Q(**{NODE_SEARCH_FIELDS['mac']: term}) + + def _search_nodes(self, nodes_query): + """Filter the given nodes query by searching field data. + + The search query is substring matched non-case sensitively + against some fields in the nodes. See `NODE_SEARCH_FIELDS`. + + :param nodes_query: A queryset of nodes. + :return: A query set of nodes that returns a subset of `nodes_query`. + """ + # Split the query into different terms. + terms = self.query.split(' ') + + # If any of the terms contain '=' then its a juju constraint and all + # other terms not using '=' are ignored. + constraint_terms = [ + term + for term in terms + if '=' in term + ] + if len(constraint_terms) > 0: + return self._constrain_nodes( + nodes_query, ' '.join(constraint_terms)) + + # Loop through the terms building the search query. + query = Q() + for term in terms: + colon_count = term.count(':') + if colon_count == 0: + query = query & self._query_all_fields(term) + elif colon_count == 1: + field, value = term.split(':', 1) + if field == '': + # In the case the user miss typed and placed a colon at + # the beginning of the term without the field, just search + # all fields with the value. + query = query & self._query_all_fields(value) + else: + query = query & self._query_specific_field(field, value) + elif colon_count > 1: + query = query & self._query_mac_address_field(term) + query = nodes_query.filter(query) + return query.distinct() + def get_queryset(self): nodes = Node.objects.get_nodes( user=self.request.user, perm=NODE_PERMISSION.VIEW) nodes = nodes.order_by(*self._compose_sort_order()) if self.query: - nodes = self._constrain_nodes(nodes) - return prefetch_nodes_listing(nodes) + nodes = self._search_nodes(nodes) + nodes = prefetch_nodes_listing(nodes) + return nodes def _prepare_sort_links(self): """Returns 2 dicts, with sort fields as keys and @@ -323,13 +516,14 @@ """ # Build relative URLs for the links, just with the params - links = {field: '?' for field in self.sort_fields} - classes = {field: 'sort-none' for field in self.sort_fields} + fields = self.sort_fields + tuple(self.late_sort_fields.keys()) + links = {field: '?' for field in fields} + classes = {field: 'sort-none' for field in fields} params = self.request.GET.copy() reverse_dir = 'asc' if self.sort_dir == 'desc' else 'desc' - for field in self.sort_fields: + for field in fields: params['sort'] = field if field == self.sort_by: params['dir'] = reverse_dir @@ -341,9 +535,21 @@ return links, classes + def late_sort(self, context): + """Sorts the node_list with sorting arguments that require + late sorting. + """ + node_list = context['node_list'] + reverse = (self.sort_dir == 'desc') + cmp_func = self.late_sort_fields.get(self.sort_by) + if cmp_func is not None: + node_list = sorted(node_list, cmp=cmp_func, reverse=reverse) + context['node_list'] = node_list + return context + def get_context_data(self, **kwargs): context = super(NodeListView, self).get_context_data(**kwargs) - context.update(get_longpoll_context()) + context = self.late_sort(context) form_class = self.get_form_class() form = self.get_form(form_class) context["preserved_query"] = self.get_preserved_query() @@ -356,6 +562,22 @@ context['power_types'] = generate_js_power_types() return context + def handle_ajax_request(self, request): + """JSON response to update the nodes listing. + + :param id: An list of system ids. Only nodes with + matching system ids will be returned. + """ + match_ids = request.GET.getlist('id') + if len(match_ids) == 0: + nodes = [] + else: + nodes = Node.objects.get_nodes( + request.user, NODE_PERMISSION.VIEW, ids=match_ids) + nodes = prefetch_nodes_listing(nodes) + nodes = [node_to_dict(node) for node in nodes] + return HttpResponse(json.dumps(nodes), mimetype='application/json') + def enlist_preseed_view(request): """View method to display the enlistment preseed.""" @@ -440,6 +662,12 @@ href="%s#third_party_drivers">settings page. """) +UNCONFIGURED_IPS_NOTICE = dedent(""" + Automatic configuration of IPv6 addresses is currently only supported on + Ubuntu, using the fast installer. To activate the IPv6 address(es) shown + here, configure them in the installed operating system. + """) + def construct_third_party_drivers_notice(user_is_admin): """Build and return the notice about third party drivers. @@ -469,6 +697,39 @@ def get_form_class(self): return get_action_form(self.request.user, self.request) + # The number of events shown on the node view page. + number_of_events_shown = 5 + + def get(self, request, *args, **kwargs): + """Handle a GET request.""" + if request.is_ajax(): + return self.handle_ajax_request(request, *args, **kwargs) + return super(NodeView, self).get(request, *args, **kwargs) + + def warn_unconfigured_ip_addresses(self, node): + """Should the UI warn about unconfigured IPv6 addresses on the node? + + Static IPv6 addresses are configured on the node using Curtin. But + this is not yet supported for all operating systems and installers. + If a node has IPv6 addresses assigned but is not being deployed in a + way that supports configuring them, the node page should show a warning + to say that the user will need to configure the node to use those + addresses. + + :return: Bool: should the UI show this warning? + """ + supported_os = (node.get_osystem() in OS_WITH_IPv6_SUPPORT) + if supported_os and node.boot_type == NODE_BOOT.FASTPATH: + # MAAS knows how to configure IPv6 addresses on an Ubuntu node + # installed with the fast installer. No warning needed. + return False + # For other installs, we need the warning if and only if the node has + # static IPv6 addresses. + static_ips = StaticIPAddress.objects.filter(macaddress__node=node) + return any( + IPAddress(static_ip.ip).version == 6 + for static_ip in static_ips) + def get_context_data(self, **kwargs): context = super(NodeView, self).get_context_data(**kwargs) node = self.get_object() @@ -478,10 +739,16 @@ messages.info(self.request, NODE_BOOT_INFO) if node.power_type == '': messages.error(self.request, NO_POWER_SET) + if self.warn_unconfigured_ip_addresses(node): + messages.warning(self.request, UNCONFIGURED_IPS_NOTICE) + context['unconfigured_ips_warning'] = UNCONFIGURED_IPS_NOTICE + context['error_text'] = ( - node.error if node.status == NODE_STATUS.FAILED_TESTS else None) + node.error if node.status == NODE_STATUS.FAILED_COMMISSIONING + else None) context['status_text'] = ( - node.error if node.status != NODE_STATUS.FAILED_TESTS else None) + node.error if node.status != NODE_STATUS.FAILED_COMMISSIONING + else None) kernel_opts = node.get_effective_kernel_options() context['kernel_opts'] = { 'is_global': kernel_opts[0] is None, @@ -496,18 +763,42 @@ # the call to get_single_probed_details() because here the # details will be guaranteed well-formed. if len(probed_details.xpath('/*/*')) == 0: - context["probed_details"] = None + context['probed_details_xml'] = None + context['probed_details_yaml'] = None else: - context["probed_details"] = etree.tostring( + context['probed_details_xml'] = etree.tostring( probed_details, encoding=unicode, pretty_print=True) - - results = NodeCommissionResult.objects.filter(node=node).count() - context['nodecommissionresults'] = results + context['probed_details_yaml'] = XMLToYAML( + etree.tostring( + probed_details, encoding=unicode, + pretty_print=True)).convert() + + commissioning_results = NodeResult.objects.filter( + node=node, result_type=RESULT_TYPE.COMMISSIONING).count() + context['nodecommissionresults'] = commissioning_results + + installation_results = NodeResult.objects.filter( + node=node, result_type=RESULT_TYPE.INSTALLATION) + if len(installation_results) > 1: + for result in installation_results: + result.name = re.sub('[_.]', ' ', result.name) + context['nodeinstallresults'] = installation_results + elif len(installation_results) == 1: + installation_results[0].name = "install log" + context['nodeinstallresults'] = installation_results context['third_party_drivers_enabled'] = Config.objects.get_config( 'enable_third_party_drivers') context['drivers'] = get_third_party_driver(node) + event_list = ( + Event.objects.filter(node=self.get_object()) + .exclude(type__level=logging.DEBUG) + .order_by('-id')[:self.number_of_events_shown]) + context['event_list'] = event_list + context['event_count'] = Event.objects.filter( + node=self.get_object()).count() + return context def dispatch(self, *args, **kwargs): @@ -525,6 +816,43 @@ def get_success_url(self): return reverse('node-view', args=[self.get_object().system_id]) + def render_node_actions(self, request): + """Render the HTML for all the available node actions.""" + template = loader.get_template('maasserver/node_actions.html') + self.object = self.get_object() + context = { + 'node': self.object, + 'can_edit': self.request.user.has_perm( + NODE_PERMISSION.EDIT, self.object), + 'form': self.get_form(self.get_form_class()), + } + return template.render(RequestContext(request, context)) + + def handle_ajax_request(self, request, *args, **kwargs): + """JSON response to update the node view.""" + node = self.get_object() + node = node_to_dict( + node, event_log_count=self.number_of_events_shown) + node['action_view'] = self.render_node_actions(request) + return HttpResponse(json.dumps(node), mimetype='application/json') + + +class NodeEventListView(NodeViewMixin, PaginatedListView): + + context_object_name = "event_list" + + template_name = "maasserver/node_event_list.html" + + def get_queryset(self): + return Event.objects.filter( + node=self.get_object()).order_by('-id') + + def get_context_data(self, **kwargs): + context = super(NodeEventListView, self).get_context_data(**kwargs) + node = self.get_object() + context['node'] = node + return context + class NodeEdit(UpdateView): @@ -540,11 +868,18 @@ def get_form_class(self): return get_node_edit_form(self.request.user) + def get_has_owner(self): + node = self.get_object() + if node is None or node.owner is None: + return mark_safe("false") + return mark_safe("true") + def get_form_kwargs(self): # This is here so the request can be passed to the form. The # form needs it because it sets error messages for the UI. kwargs = super(NodeEdit, self).get_form_kwargs() kwargs['request'] = self.request + kwargs['ui_submission'] = True return kwargs def get_success_url(self): @@ -554,6 +889,9 @@ context = super(NodeEdit, self).get_context_data(**kwargs) context['power_types'] = generate_js_power_types( self.get_object().nodegroup) + # 'os_release' lets us know if we should render the `OS` + # and `Release` choice fields in the UI. + context['os_release'] = self.get_has_owner() return context @@ -568,8 +906,6 @@ node = Node.objects.get_node_or_404( system_id=system_id, user=self.request.user, perm=NODE_PERMISSION.ADMIN) - if node.status == NODE_STATUS.ALLOCATED: - raise PermissionDenied() return node def get_next_url(self): diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/prefs.py maas-1.7.6+bzr3376/src/maasserver/views/prefs.py --- maas-1.5.4+bzr2294/src/maasserver/views/prefs.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/prefs.py 2015-07-10 01:27:14.000000000 +0000 @@ -31,8 +31,12 @@ from maasserver.forms import ( ProfileForm, SSHKeyForm, + SSLKeyForm, + ) +from maasserver.models import ( + SSHKey, + SSLKey, ) -from maasserver.models import SSHKey from maasserver.views import ( HelpfulDeleteView, process_form, @@ -57,6 +61,41 @@ return reverse('prefs') +class SSLKeyCreateView(CreateView): + + form_class = SSLKeyForm + template_name = 'maasserver/prefs_add_sslkey.html' + + def get_form_kwargs(self): + kwargs = super(SSLKeyCreateView, self).get_form_kwargs() + kwargs['user'] = self.request.user + return kwargs + + def form_valid(self, form): + messages.info(self.request, "SSL key added.") + return super(SSLKeyCreateView, self).form_valid(form) + + def get_success_url(self): + return reverse('prefs') + + +class SSLKeyDeleteView(HelpfulDeleteView): + + template_name = 'maasserver/prefs_confirm_delete_sslkey.html' + context_object_name = 'sslkey' + model = SSLKey + + def get_object(self): + keyid = self.kwargs.get('keyid', None) + key = get_object_or_404(SSLKey, id=keyid) + if key.user != self.request.user: + raise PermissionDenied("Can't delete this key. It's not yours.") + return key + + def get_next_url(self): + return reverse('prefs') + + class SSHKeyDeleteView(HelpfulDeleteView): template_name = 'maasserver/prefs_confirm_delete_sshkey.html' diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/rpc.py maas-1.7.6+bzr3376/src/maasserver/views/rpc.py --- maas-1.5.4+bzr2294/src/maasserver/views/rpc.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/rpc.py 2015-07-10 01:27:14.000000000 +0000 @@ -29,15 +29,21 @@ def info(request): """View returning a JSON document with information about RPC endpoints. - Currently the only information returned is a list of `(host, port)` - tuples on which the region has listening RPC endpoints. + Currently the only information returned is a list of `(host, port)` tuples + on which the region has listening RPC endpoints. + + When the `rpc-advertise` service is not running this returns `None` + instead of the list of event-loop endpoints. This denotes something along + the lines of "I don't know". The cluster should not act on this, and + instead ask again later. + """ try: advertiser = eventloop.services.getServiceNamed("rpc-advertise") except KeyError: # RPC advertising service has not been created, so we declare # that there are no endpoints *at all*. - endpoints = {} + endpoints = None else: if advertiser.running: endpoints = {} @@ -49,7 +55,7 @@ else: # RPC advertising service is not running, so we declare that # there are no endpoints *at all*. - endpoints = {} + endpoints = None # Each endpoint is an entry point into this event-loop. info = {"eventloops": endpoints} diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/settings_license_keys.py maas-1.7.6+bzr3376/src/maasserver/views/settings_license_keys.py --- maas-1.5.4+bzr2294/src/maasserver/views/settings_license_keys.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/settings_license_keys.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,95 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""License Key Settings views.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "LicenseKeyCreate", + "LicenseKeyDelete", + "LicenseKeyEdit", + ] + +from django.contrib import messages +from django.core.urlresolvers import reverse +from django.http import HttpResponseRedirect +from django.shortcuts import get_object_or_404 +from django.views.generic import ( + CreateView, + DeleteView, + UpdateView, + ) +from maasserver.forms import LicenseKeyForm +from maasserver.models import LicenseKey + +# The anchor of the license keys slot on the settings page. +LICENSE_KEY_ANCHOR = 'license_keys' + + +class LicenseKeyDelete(DeleteView): + + template_name = ( + 'maasserver/settings_confirm_delete_license_key.html') + context_object_name = 'license_key_to_delete' + + def get_object(self): + osystem = self.kwargs.get('osystem', None) + distro_series = self.kwargs.get('distro_series', None) + return get_object_or_404( + LicenseKey, osystem=osystem, distro_series=distro_series) + + def get_next_url(self): + return reverse('settings') + '#' + LICENSE_KEY_ANCHOR + + def delete(self, request, *args, **kwargs): + license_key = self.get_object() + license_key.delete() + messages.info( + request, + "License key %s/%s deleted." % ( + license_key.osystem, + license_key.distro_series, + )) + return HttpResponseRedirect(self.get_next_url()) + + +class LicenseKeyCreate(CreateView): + template_name = 'maasserver/settings_add_license_key.html' + form_class = LicenseKeyForm + context_object_name = 'licensekey' + + def get_success_url(self): + return reverse('settings') + '#' + LICENSE_KEY_ANCHOR + + def form_valid(self, form): + messages.info(self.request, "License key created.") + return super(LicenseKeyCreate, self).form_valid(form) + + +class LicenseKeyEdit(UpdateView): + """View for editing a license key.""" + + model = LicenseKey + form_class = LicenseKeyForm + template_name = 'maasserver/settings_edit_license_key.html' + + def get_object(self): + osystem = self.kwargs.get('osystem', None) + distro_series = self.kwargs.get('distro_series', None) + return get_object_or_404( + LicenseKey, osystem=osystem, distro_series=distro_series) + + def get_success_url(self): + return reverse('settings') + '#' + LICENSE_KEY_ANCHOR + + def form_valid(self, form): + messages.info(self.request, "License key updated.") + return super(LicenseKeyEdit, self).form_valid(form) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/settings.py maas-1.7.6+bzr3376/src/maasserver/views/settings.py --- maas-1.5.4+bzr2294/src/maasserver/views/settings.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/settings.py 2015-07-10 01:27:14.000000000 +0000 @@ -38,17 +38,30 @@ from django.views.generic.base import TemplateView from django.views.generic.detail import SingleObjectTemplateResponseMixin from django.views.generic.edit import ModelFormMixin +from maasserver.clusterrpc.osystems import gen_all_known_operating_systems from maasserver.exceptions import CannotDeleteUserException from maasserver.forms import ( + BootSourceSettingsForm, CommissioningForm, + DeployForm, + DiskErasingOnReleaseForm, EditUserForm, GlobalKernelOptsForm, MAASAndNetworkForm, NewUserCreationForm, ThirdPartyDriversForm, UbuntuForm, + WindowsForm, + ) +from maasserver.models import ( + BootSource, + LicenseKey, + UserProfile, + ) +from maasserver.utils.osystems import ( + get_osystem_from_osystems, + get_release_from_osystem, ) -from maasserver.models import UserProfile from maasserver.views import process_form from metadataserver.models import CommissioningScript @@ -153,9 +166,46 @@ return self.respond(request, profile_form, password_form) +def has_osystems_supporting_license_keys(osystems): + """Return True if the given osystems supports releases with license keys. + """ + for osystem in osystems: + for release in osystem['releases']: + if release['requires_license_key']: + return True + return False + + +def set_license_key_titles(license_key, osystems): + """Sets the osystem_title and distro_series_title field on the + license_key. + + Uses the given "osystems" to get the titles. + """ + osystem = get_osystem_from_osystems(osystems, license_key.osystem) + if osystem is None: + license_key.osystem_title = license_key.osystem + license_key.distro_series_title = license_key.distro_series + return + license_key.osystem_title = osystem['title'] + release = get_release_from_osystem(osystem, license_key.distro_series) + if release is None: + license_key.distro_series_title = license_key.distro_series + return + license_key.distro_series_title = release['title'] + + def settings(request): user_list = UserProfile.objects.all_users().order_by('username') + # Process boot source settings form. + show_boot_source = BootSource.objects.count() < 2 + boot_source_form, response = process_form( + request, BootSourceSettingsForm, reverse('settings'), + 'boot_source', "Configuration updated.") + if response is not None: + return response + # Process Third Party Drivers form. third_party_drivers_form, response = process_form( request, ThirdPartyDriversForm, reverse('settings'), @@ -163,6 +213,13 @@ if response is not None: return response + # Process disk erasing on release form. + disk_erasing_on_release_form, response = process_form( + request, DiskErasingOnReleaseForm, reverse('settings'), + 'disk_erasing_on_release', "Configuration updated.") + if response is not None: + return response + # Process the MAAS & network form. maas_and_network_form, response = process_form( request, MAASAndNetworkForm, reverse('settings'), 'maas_and_network', @@ -177,6 +234,13 @@ if response is not None: return response + # Process the Deploy form. + deploy_form, response = process_form( + request, DeployForm, reverse('settings'), 'deploy', + "Configuration updated.") + if response is not None: + return response + # Process the Ubuntu form. ubuntu_form, response = process_form( request, UbuntuForm, reverse('settings'), 'ubuntu', @@ -184,6 +248,13 @@ if response is not None: return response + # Process the Windows form. + windows_form, response = process_form( + request, WindowsForm, reverse('settings'), 'windows', + "Configuration updated.") + if response is not None: + return response + # Process the Global Kernel Opts form. kernelopts_form, response = process_form( request, GlobalKernelOptsForm, reverse('settings'), 'kernelopts', @@ -194,15 +265,29 @@ # Commissioning scripts. commissioning_scripts = CommissioningScript.objects.all() + # License keys w/ titles for osystem and distro_series + osystems = list(gen_all_known_operating_systems()) + show_license_keys = has_osystems_supporting_license_keys(osystems) + license_keys = LicenseKey.objects.all() + for license_key in license_keys: + set_license_key_titles(license_key, osystems) + return render_to_response( 'maasserver/settings.html', { 'user_list': user_list, 'commissioning_scripts': commissioning_scripts, + 'show_license_keys': show_license_keys, + 'license_keys': license_keys, 'maas_and_network_form': maas_and_network_form, + 'show_boot_source': show_boot_source, + 'boot_source_form': boot_source_form, 'third_party_drivers_form': third_party_drivers_form, + 'disk_erasing_on_release_form': disk_erasing_on_release_form, 'commissioning_form': commissioning_form, + 'deploy_form': deploy_form, 'ubuntu_form': ubuntu_form, + 'windows_form': windows_form, 'kernelopts_form': kernelopts_form, }, context_instance=RequestContext(request)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tags.py maas-1.7.6+bzr3376/src/maasserver/views/tags.py --- maas-1.5.4+bzr2294/src/maasserver/views/tags.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -43,7 +43,8 @@ user=self.request.user, perm=NODE_PERMISSION.VIEW, from_nodes=self.tag.node_set.all()) nodes = nodes.order_by('-created') - return prefetch_nodes_listing(nodes) + nodes = prefetch_nodes_listing(nodes) + return nodes def get_context_data(self, **kwargs): context = super(TagView, self).get_context_data(**kwargs) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_account.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_account.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_account.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_account.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver account views.""" @@ -17,7 +17,10 @@ from django.conf import settings from django.contrib.auth import SESSION_KEY from django.core.urlresolvers import reverse -from lxml.html import fromstring +from lxml.html import ( + fromstring, + tostring, + ) from maasserver.testing import ( extract_redirect, get_content_links, @@ -29,7 +32,7 @@ class TestLogin(MAASServerTestCase): def test_login_contains_input_tags_if_user(self): - factory.make_user() + factory.make_User() response = self.client.get('/accounts/login/') doc = fromstring(response.content) self.assertFalse(response.context['no_users']) @@ -37,19 +40,35 @@ self.assertEqual(1, len(doc.cssselect('input#id_password'))) def test_login_displays_createadmin_message_if_no_user(self): - path = factory.getRandomString() + path = factory.make_string() self.patch(settings, 'MAAS_CLI', path) response = self.client.get('/accounts/login/') self.assertTrue(response.context['no_users']) self.assertEqual(path, response.context['create_command']) def test_login_redirects_when_authenticated(self): - password = factory.getRandomString() - user = factory.make_user(password=password) + password = factory.make_string() + user = factory.make_User(password=password) self.client.login(username=user.username, password=password) response = self.client.get('/accounts/login/') self.assertEqual('/', extract_redirect(response)) + def test_login_sets_autocomplete_off_in_production(self): + self.patch(settings, 'DEBUG', False) + factory.make_User() + response = self.client.get('/accounts/login/') + doc = fromstring(response.content) + form = doc.cssselect("form")[0] + self.assertIn('autocomplete="off"', tostring(form)) + + def test_login_sets_autocomplete_on_in_debug_mode(self): + self.patch(settings, 'DEBUG', True) + factory.make_User() + response = self.client.get('/accounts/login/') + doc = fromstring(response.content) + form = doc.cssselect("form")[0] + self.assertNotIn('autocomplete="off"', tostring(form)) + class TestLogout(MAASServerTestCase): diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_boot_image_list.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_boot_image_list.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_boot_image_list.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_boot_image_list.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test maasserver boot image list view.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -import itertools - -from django.core.urlresolvers import reverse -from lxml.html import fromstring -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.views.clusters import BootImagesListView -from testtools.matchers import ContainsAll - - -class BootImageListTest(MAASServerTestCase): - - def test_contains_boot_image_list(self): - self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - images = [ - factory.make_boot_image(nodegroup=nodegroup) for _ in range(3)] - response = self.client.get( - reverse('cluster-bootimages-list', args=[nodegroup.uuid])) - self.assertEqual( - httplib.OK, response.status_code, response.content) - items_in_page = [ - [ - '%s' % image.id, - image.label, - image.purpose, - image.release, - image.subarchitecture, - image.architecture, - '%s' % image.updated.year, - ] for image in images] - self.assertThat( - response.content, ContainsAll(itertools.chain(*items_in_page))) - - def test_listing_is_paginated(self): - self.patch(BootImagesListView, "paginate_by", 3) - self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - # Create 4 images. - [ - factory.make_boot_image(nodegroup=nodegroup) for _ in range(4)] - response = self.client.get( - reverse('cluster-bootimages-list', args=[nodegroup.uuid])) - self.assertEqual(httplib.OK, response.status_code) - doc = fromstring(response.content) - self.assertEqual( - 1, len(doc.cssselect('div.pagination')), - "Couldn't find pagination tag.") - - def test_displays_warning_if_boot_image_list_is_empty(self): - # Create boot images in another nodegroup. - [factory.make_boot_image() for _ in range(3)] - self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('cluster-bootimages-list', args=[nodegroup.uuid])) - self.assertEqual(httplib.OK, response.status_code) - doc = fromstring(response.content) - self.assertEqual( - 1, len(doc.cssselect('#no_boot_images_warning')), - "Warning about missing images not present") diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_clusters.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_clusters.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_clusters.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_clusters.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,33 +19,27 @@ from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.enum import ( + NODEGROUP_STATE, NODEGROUP_STATUS, NODEGROUP_STATUS_CHOICES, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.models import ( + BootResource, NodeGroup, - nodegroup as nodegroup_module, NodeGroupInterface, ) from maasserver.testing import ( extract_redirect, get_content_links, - reload_object, ) from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils import map_enum from maasserver.views.clusters import ClusterListView -from mock import ( - ANY, - call, - ) +from provisioningserver.utils.enum import map_enum from testtools.matchers import ( - AllMatch, - Contains, ContainsAll, - Equals, HasLength, MatchesStructure, ) @@ -67,7 +61,7 @@ def test_cluster_listing_contains_links_to_manipulate_clusters(self): self.client_log_in(as_admin=True) nodegroups = { - factory.make_node_group(status=self.status) + factory.make_NodeGroup(status=self.status) for _ in range(3) } links = get_content_links(self.client.get(self.get_url())) @@ -89,9 +83,9 @@ def test_make_title_entry_returns_link_for_other_status(self): # If the entry's status is different from the view's status, # the returned entry is a link. - other_status = factory.getRandomChoice( + other_status = factory.pick_choice( NODEGROUP_STATUS_CHOICES, but_not=[self.status]) - factory.make_node_group(status=other_status) + factory.make_NodeGroup(status=other_status) link_name = ClusterListView.status_links[other_status] view = self.make_listing_view(self.status) entry = view.make_title_entry(other_status, link_name) @@ -104,7 +98,7 @@ def test_make_title_entry_returns_title_if_no_cluster(self): # If no cluster correspond to the entry's status, the returned # entry is not a link: it's a simple mention '0 clusters'. - other_status = factory.getRandomChoice( + other_status = factory.pick_choice( NODEGROUP_STATUS_CHOICES, but_not=[self.status]) link_name = ClusterListView.status_links[other_status] view = self.make_listing_view(self.status) @@ -115,7 +109,7 @@ def test_title_displays_number_of_clusters(self): for _ in range(3): - factory.make_node_group(status=self.status) + factory.make_NodeGroup(status=self.status) view = self.make_listing_view(self.status) status_name = NODEGROUP_STATUS_CHOICES[self.status][1] title = view.make_cluster_listing_title() @@ -129,7 +123,7 @@ for status in map_enum(NODEGROUP_STATUS).values(): if status != self.status: other_statuses.append(status) - factory.make_node_group(status=status) + factory.make_NodeGroup(status=status) for status in other_statuses: link_name = ClusterListView.status_links[status] title = view.make_cluster_listing_title() @@ -139,7 +133,7 @@ self.patch(ClusterListView, "paginate_by", 2) self.client_log_in(as_admin=True) for _ in range(3): - factory.make_node_group(status=self.status) + factory.make_NodeGroup(status=self.status) response = self.client.get(self.get_url()) self.assertEqual(httplib.OK, response.status_code) doc = fromstring(response.content) @@ -149,6 +143,76 @@ "Couldn't find pagination tag.") +class ClusterListingStateTest(MAASServerTestCase): + + scenarios = [ + ('disconnected', { + 'state': NODEGROUP_STATE.DISCONNECTED, + 'text': '-', + 'connection': '✗', + }), + ('out-of-sync', { + 'state': NODEGROUP_STATE.OUT_OF_SYNC, + 'text': NODEGROUP_STATE.OUT_OF_SYNC, + 'connection': '✓', + }), + ('syncing', { + 'state': NODEGROUP_STATE.SYNCING, + 'text': NODEGROUP_STATE.SYNCING, + 'connection': '✓', + }), + ('synced', { + 'state': NODEGROUP_STATE.SYNCED, + 'text': NODEGROUP_STATE.SYNCED, + 'connection': '✓', + }), + ] + + def test_listing_displays_connected_image_status(self): + self.client_log_in(as_admin=True) + factory.make_BootResource() + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED, name=self.state) + + def mock_get_state(self): + # Return a state, which is set to the name of the node. + return self.name + self.patch(NodeGroup, 'get_state', mock_get_state) + + response = self.client.get( + reverse(ClusterListView.status_links[NODEGROUP_STATUS.ACCEPTED])) + document = fromstring(response.content) + images_col = document.xpath( + "//td[@id='%s_images']" % nodegroup.uuid)[0] + connection_col = document.xpath( + "//td[@id='%s_connection']" % nodegroup.uuid)[0] + self.assertEqual( + self.text, images_col.text_content().strip()) + self.assertEqual( + self.connection, connection_col.text_content().strip()) + + +class ClusterListingNoImagesTest(MAASServerTestCase): + + def test_listing_displays_no_images_available(self): + self.client_log_in(as_admin=True) + BootResource.objects.all().delete() + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED) + + def mock_get_state(self): + return NODEGROUP_STATE.OUT_OF_SYNC + self.patch(NodeGroup, 'get_state', mock_get_state) + + response = self.client.get( + reverse(ClusterListView.status_links[NODEGROUP_STATUS.ACCEPTED])) + document = fromstring(response.content) + images_col = document.xpath( + "//td[@id='%s_images']" % nodegroup.uuid)[0] + self.assertEqual( + "No images available", images_col.text_content().strip()) + + class ClusterListingAccess(MAASServerTestCase): def test_admin_sees_cluster_tab(self): @@ -168,7 +232,7 @@ def test_pending_listing_contains_form_to_accept_all_nodegroups(self): self.client_log_in(as_admin=True) - factory.make_node_group(status=NODEGROUP_STATUS.PENDING), + factory.make_NodeGroup(status=NODEGROUP_STATUS.PENDING), response = self.client.get(reverse('cluster-list-pending')) doc = fromstring(response.content) forms = doc.cssselect('form#accept_all_pending_nodegroups') @@ -176,7 +240,7 @@ def test_pending_listing_contains_form_to_reject_all_nodegroups(self): self.client_log_in(as_admin=True) - factory.make_node_group(status=NODEGROUP_STATUS.PENDING), + factory.make_NodeGroup(status=NODEGROUP_STATUS.PENDING), response = self.client.get(reverse('cluster-list-pending')) doc = fromstring(response.content) forms = doc.cssselect('form#reject_all_pending_nodegroups') @@ -185,8 +249,8 @@ def test_pending_listing_accepts_all_pending_nodegroups_POST(self): self.client_log_in(as_admin=True) nodegroups = { - factory.make_node_group(status=NODEGROUP_STATUS.PENDING), - factory.make_node_group(status=NODEGROUP_STATUS.PENDING), + factory.make_NodeGroup(status=NODEGROUP_STATUS.PENDING), + factory.make_NodeGroup(status=NODEGROUP_STATUS.PENDING), } response = self.client.post( reverse('cluster-list-pending'), {'mass_accept_submit': 1}) @@ -198,8 +262,8 @@ def test_pending_listing_rejects_all_pending_nodegroups_POST(self): self.client_log_in(as_admin=True) nodegroups = { - factory.make_node_group(status=NODEGROUP_STATUS.PENDING), - factory.make_node_group(status=NODEGROUP_STATUS.PENDING), + factory.make_NodeGroup(status=NODEGROUP_STATUS.PENDING), + factory.make_NodeGroup(status=NODEGROUP_STATUS.PENDING), } response = self.client.post( reverse('cluster-list-pending'), {'mass_reject_submit': 1}) @@ -211,42 +275,69 @@ class ClusterAcceptedListingTest(MAASServerTestCase): - def test_accepted_listing_import_boot_images_calls_tasks(self): + def test_warning_is_displayed_if_a_cluster_is_disconnected(self): self.client_log_in(as_admin=True) - recorder = self.patch(nodegroup_module, 'import_boot_images') - accepted_nodegroups = [ - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED), - factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED), - ] - response = self.client.post( - reverse('cluster-list'), {'import_all_boot_images': 1}) - self.assertEqual(httplib.FOUND, response.status_code) - calls = [ - call(queue=nodegroup.work_queue, kwargs=ANY) - for nodegroup in accepted_nodegroups - ] - self.assertItemsEqual(calls, recorder.apply_async.call_args_list) + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED) + + self.patch( + NodeGroup, 'get_state', lambda _: NODEGROUP_STATE.DISCONNECTED) + response = self.client.get(reverse('cluster-list')) + document = fromstring(response.content) + nodegroup_row = document.xpath("//tr[@id='%s']" % nodegroup.uuid)[0] + self.assertIn('warning', nodegroup_row.get('class')) + warning_elems = ( + nodegroup_row.xpath( + """//img[@title="Warning: this cluster is disconnected."]""")) + self.assertThat( + warning_elems, HasLength(1), + "No warning about disconnected cluster.") + + def test_warning_is_displayed_if_region_is_missing_images(self): + self.client_log_in(as_admin=True) + BootResource.objects.all().delete() + nodegroup = factory.make_NodeGroup( + status=NODEGROUP_STATUS.ACCEPTED) + + self.patch( + NodeGroup, 'get_state', lambda _: NODEGROUP_STATE.OUT_OF_SYNC) + response = self.client.get(reverse('cluster-list')) + document = fromstring(response.content) + nodegroup_row = document.xpath("//tr[@id='%s']" % nodegroup.uuid)[0] + self.assertIn('warning', nodegroup_row.get('class')) + warning_elems = ( + nodegroup_row.xpath( + "//img[@title=\"Warning: this cluster cannot sync images as " + "the region doesn't have any images.\"]")) + self.assertThat( + warning_elems, HasLength(1), + "No warning about region not containing images.") - def test_a_warning_is_displayed_if_the_cluster_has_no_boot_images(self): + def test_warning_is_displayed_if_a_cluster_is_out_of_sync(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group( + factory.make_BootResource() + nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ACCEPTED) + + self.patch( + NodeGroup, 'get_state', lambda _: NODEGROUP_STATE.OUT_OF_SYNC) response = self.client.get(reverse('cluster-list')) document = fromstring(response.content) nodegroup_row = document.xpath("//tr[@id='%s']" % nodegroup.uuid)[0] self.assertIn('warning', nodegroup_row.get('class')) warning_elems = ( nodegroup_row.xpath( - "//img[@title='Warning: this cluster has no boot images.']")) - self.assertEqual( - 1, len(warning_elems), "No warning about missing boot images.") + """//img[@title="Warning: this cluster is out-of-sync."]""")) + self.assertThat( + warning_elems, HasLength(1), + "No warning about out-of-sync cluster.") class ClusterDeleteTest(MAASServerTestCase): def test_can_delete_cluster(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() delete_link = reverse('cluster-delete', args=[nodegroup.uuid]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( @@ -260,24 +351,24 @@ def test_cluster_page_contains_links_to_edit_and_delete_interfaces(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() interfaces = set() - for i in range(3): - interface = factory.make_node_group_interface( - nodegroup=nodegroup, - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - interfaces.add(interface) + for _ in range(3): + interfaces.add( + factory.make_NodeGroupInterface( + nodegroup=nodegroup, + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED)) links = get_content_links( self.client.get(reverse('cluster-edit', args=[nodegroup.uuid]))) interface_edit_links = [ reverse( 'cluster-interface-edit', - args=[nodegroup.uuid, interface.interface]) + args=[nodegroup.uuid, interface.name]) for interface in interfaces] interface_delete_links = [ reverse( 'cluster-interface-delete', - args=[nodegroup.uuid, interface.interface]) + args=[nodegroup.uuid, interface.name]) for interface in interfaces] self.assertThat( links, @@ -285,12 +376,12 @@ def test_can_edit_cluster(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() edit_link = reverse('cluster-edit', args=[nodegroup.uuid]) data = { 'cluster_name': factory.make_name('cluster_name'), 'name': factory.make_name('name'), - 'status': factory.getRandomEnum(NODEGROUP_STATUS), + 'status': factory.pick_enum(NODEGROUP_STATUS), } response = self.client.post(edit_link, data) self.assertEqual(httplib.FOUND, response.status_code, response.content) @@ -300,52 +391,36 @@ def test_contains_link_to_add_interface(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() links = get_content_links( self.client.get(reverse('cluster-edit', args=[nodegroup.uuid]))) self.assertIn( reverse('cluster-interface-create', args=[nodegroup.uuid]), links) - def test_contains_link_to_boot_image_list(self): + def test_admin_can_disable_default_disable_ipv4_flag(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - [factory.make_boot_image(nodegroup=nodegroup) for _ in range(3)] - response = self.client.get( - reverse('cluster-edit', args=[nodegroup.uuid])) - self.assertEqual( - httplib.OK, response.status_code, response.content) - links = get_content_links(response) - self.assertIn( - reverse('cluster-bootimages-list', args=[nodegroup.uuid]), links) - - def test_displays_warning_if_boot_image_list_is_empty(self): - # Create boot images in another nodegroup. - [factory.make_boot_image() for _ in range(3)] - self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - response = self.client.get( - reverse('cluster-edit', args=[nodegroup.uuid])) - self.assertEqual(httplib.OK, response.status_code) - doc = fromstring(response.content) - self.assertEqual( - 1, len(doc.cssselect('#no_boot_images_warning')), - "Warning about missing images not present") - links = get_content_links(response) - self.assertNotIn( - reverse('cluster-bootimages-list', args=[nodegroup.uuid]), links) + nodegroup = factory.make_NodeGroup(default_disable_ipv4=True) + edit_link = reverse('cluster-edit', args=[nodegroup.uuid]) + # In a UI submission, omitting a boolean means setting it to False. + data = { + 'ui_submission': True, + } + response = self.client.post(edit_link, data) + self.assertEqual(httplib.FOUND, response.status_code) + self.assertFalse(reload_object(nodegroup).default_disable_ipv4) class ClusterInterfaceDeleteTest(MAASServerTestCase): def test_can_delete_cluster_interface(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - interface = factory.make_node_group_interface( + nodegroup = factory.make_NodeGroup() + interface = factory.make_NodeGroupInterface( nodegroup=nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) delete_link = reverse( 'cluster-interface-delete', - args=[nodegroup.uuid, interface.interface]) + args=[nodegroup.uuid, interface.name]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), @@ -355,13 +430,13 @@ def test_interface_delete_supports_interface_alias(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - interface = factory.make_node_group_interface( - nodegroup=nodegroup, interface="eth0:0") + interface = factory.make_NodeGroupInterface( + nodegroup=nodegroup, name="eth0:0") delete_link = reverse( 'cluster-interface-delete', - args=[nodegroup.uuid, interface.interface]) + args=[nodegroup.uuid, interface.name]) # The real test is that reverse() does not blow up when the # interface's name contains an alias. self.assertIsInstance(delete_link, (bytes, unicode)) @@ -371,13 +446,13 @@ def test_can_edit_cluster_interface(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - interface = factory.make_node_group_interface( + interface = factory.make_NodeGroupInterface( nodegroup=nodegroup) edit_link = reverse( 'cluster-interface-edit', - args=[nodegroup.uuid, interface.interface]) + args=[nodegroup.uuid, interface.name]) data = factory.get_interface_fields() response = self.client.post(edit_link, data) self.assertEqual( @@ -389,13 +464,13 @@ def test_interface_edit_supports_interface_alias(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - interface = factory.make_node_group_interface( - nodegroup=nodegroup, interface="eth0:0") + interface = factory.make_NodeGroupInterface( + nodegroup=nodegroup, name="eth0:0") edit_link = reverse( 'cluster-interface-edit', - args=[nodegroup.uuid, interface.interface]) + args=[nodegroup.uuid, interface.name]) # The real test is that reverse() does not blow up when the # interface's name contains an alias. self.assertIsInstance(edit_link, (bytes, unicode)) @@ -405,7 +480,7 @@ def test_can_create_cluster_interface(self): self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) create_link = reverse( 'cluster-interface-create', args=[nodegroup.uuid]) @@ -415,37 +490,7 @@ (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), (response.status_code, extract_redirect(response))) interface = NodeGroupInterface.objects.get( - nodegroup__uuid=nodegroup.uuid, interface=data['interface']) + nodegroup__uuid=nodegroup.uuid, name=data['name']) self.assertThat( reload_object(interface), MatchesStructure.byEquality(**data)) - - -# XXX: rvb 2012-10-08 bug=1063881: apache transforms '//' into '/' in -# the urls it passes around and this happens when an interface has an empty -# name. -class ClusterInterfaceDoubleSlashBugTest(MAASServerTestCase): - - def test_edit_delete_empty_cluster_interface_when_slash_removed(self): - self.client_log_in(as_admin=True) - nodegroup = factory.make_node_group() - interface = factory.make_node_group_interface( - nodegroup=nodegroup, interface='', - management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) - edit_link = reverse( - 'cluster-interface-edit', - args=[nodegroup.uuid, interface.interface]) - delete_link = reverse( - 'cluster-interface-delete', - args=[nodegroup.uuid, interface.interface]) - links = [edit_link, delete_link] - # Just make sure that the urls contains '//'. If this is not - # true anymore, because we've refactored the urls, this test can - # problably be removed. - self.assertThat(links, AllMatch(Contains('//'))) - # Simulate what apache (when used as a frontend) does to the - # urls. - new_links = [link.replace('//', '/') for link in links] - response_statuses = [ - self.client.get(link).status_code for link in new_links] - self.assertThat(response_statuses, AllMatch(Equals(httplib.OK))) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_combo.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_combo.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_combo.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_combo.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test combo view.""" @@ -33,15 +33,15 @@ class TestUtilities(MAASServerTestCase): def test_get_abs_location_returns_absolute_location_if_not_None(self): - abs_location = '%s%s' % (os.path.sep, factory.getRandomString()) + abs_location = '%s%s' % (os.path.sep, factory.make_string()) self.assertEqual( abs_location, get_absolute_location(location=abs_location)) def test_get_abs_location_returns_rel_loc_if_static_root_not_none(self): - static_root = factory.getRandomString() + static_root = factory.make_string() self.patch(settings, 'STATIC_ROOT', static_root) rel_location = os.path.join( - factory.getRandomString(), factory.getRandomString()) + factory.make_string(), factory.make_string()) expected_location = os.path.join(static_root, rel_location) self.assertEqual( expected_location, get_absolute_location(location=rel_location)) @@ -49,7 +49,7 @@ def test_get_abs_location_returns_rel_loc_if_static_root_is_none(self): self.patch(settings, 'STATIC_ROOT', None) rel_location = os.path.join( - factory.getRandomString(), factory.getRandomString()) + factory.make_string(), factory.make_string()) rel_location_base = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'static') @@ -59,15 +59,15 @@ def test_get_combo_view_returns_callable(self): rel_location = os.path.join( - factory.getRandomString(), factory.getRandomString()) + factory.make_string(), factory.make_string()) view = get_combo_view(rel_location) self.assertIsInstance(view, Callable) def test_get_combo_view_loads_from_disk(self): - test_file_contents = factory.getRandomString() + test_file_contents = factory.make_string() # Create a valid file with a proper extension (the combo loader only # serves JS or CSS files) - test_file_name = "%s.js" % factory.getRandomString() + test_file_name = "%s.js" % factory.make_string() test_file = self.make_file( name=test_file_name, contents=test_file_contents) directory = os.path.dirname(test_file) @@ -88,10 +88,10 @@ # redirected. # Create a test file with an unknown extension. test_file_name = "%s.%s" % ( - factory.getRandomString(), factory.getRandomString()) - redirect_root = factory.getRandomString() + factory.make_string(), factory.make_string()) + redirect_root = factory.make_string() view = get_combo_view( - factory.getRandomString(), default_redirect=redirect_root) + factory.make_string(), default_redirect=redirect_root) rf = RequestFactory() request = rf.get("/test/?%s" % test_file_name) response = view(request) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_general.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_general.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_general.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_general.py 2015-07-10 01:27:14.000000000 +0000 @@ -207,7 +207,7 @@ def test_delete_deletes_object(self): obj = FakeDeletableModel() # HttpResponseRedirect does not allow next_url to be None. - view = FakeDeleteView(obj, next_url=factory.getRandomString()) + view = FakeDeleteView(obj, next_url=factory.make_string()) view.delete() self.assertTrue(obj.deleted) self.assertEqual([view.compose_feedback_deleted(obj)], view.notices) @@ -215,7 +215,7 @@ def test_delete_is_gentle_with_missing_objects(self): # Deleting a nonexistent object is basically treated as successful. # HttpResponseRedirect does not allow next_url to be None. - view = FakeDeleteView(next_url=factory.getRandomString()) + view = FakeDeleteView(next_url=factory.make_string()) response = view.delete() self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual([view.compose_feedback_nonexistent()], view.notices) @@ -227,7 +227,7 @@ def test_get_asks_for_confirmation_and_does_nothing_yet(self): obj = FakeDeletableModel() - next_url = factory.getRandomString() + next_url = factory.make_string() request = RequestFactory().get('/foo') view = FakeDeleteView(obj, request=request, next_url=next_url) response = view.get(request) @@ -237,7 +237,7 @@ self.assertEqual([], view.notices) def test_get_skips_confirmation_for_missing_objects(self): - next_url = factory.getRandomString() + next_url = factory.make_string() request = RequestFactory().get('/foo') view = FakeDeleteView(next_url=next_url, request=request) response = view.get(request) @@ -245,7 +245,7 @@ self.assertEqual([view.compose_feedback_nonexistent()], view.notices) def test_compose_feedback_nonexistent_names_class(self): - class_name = factory.getRandomString() + class_name = factory.make_string() self.patch(FakeDeletableModel.Meta, 'verbose_name', class_name) view = FakeDeleteView() self.assertEqual( @@ -253,7 +253,7 @@ view.compose_feedback_nonexistent()) def test_compose_feedback_deleted_uses_name_object(self): - object_name = factory.getRandomString() + object_name = factory.make_string() view = FakeDeleteView(FakeDeletableModel()) view.name_object = lambda _obj: object_name self.assertEqual( @@ -379,7 +379,7 @@ def post(self, request, *args, **kwargs): raise ExternalComponentException() self.patch(NodeEdit, 'post', post) - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node_edit_link = reverse('node-edit', args=[node.system_id]) response = self.client.post(node_edit_link, {}) self.assertEqual(node_edit_link, extract_redirect(response)) @@ -388,13 +388,13 @@ # When a ExternalComponentException is raised in a POST request, a # message is published with the error message. self.client_log_in() - error_message = factory.getRandomString() + error_message = factory.make_string() # Patch NodeEdit to error on post. def post(self, request, *args, **kwargs): raise ExternalComponentException(error_message) self.patch(NodeEdit, 'post', post) - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node_edit_link = reverse('node-edit', args=[node.system_id]) self.client.post(node_edit_link, {}) # Manually perform the redirect: i.e. get the same page. @@ -414,12 +414,11 @@ ] errors = [] for fault in fault_codes: - # Create component with getRandomString to be sure - # to display all the errors. + # Create component with make_string to be sure to display all + # the errors. component = factory.make_name('component') error_message = factory.make_name('error') - error = Fault(fault, error_message) - errors.append(error) + errors.append(Fault(fault, error_message)) register_persistent_error(component, error_message) links = [ reverse('index'), diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_images.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_images.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_images.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_images.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,920 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test maasserver images views.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import httplib +import json +import random + +from django.core.urlresolvers import reverse +from lxml.html import fromstring +from maasserver.enum import ( + BOOT_RESOURCE_TYPE, + NODE_STATUS, + ) +from maasserver.models import ( + BootResource, + BootSourceCache, + BootSourceSelection, + Config, + ) +from maasserver.testing import extract_redirect +from maasserver.testing.factory import factory +from maasserver.testing.orm import ( + get_one, + reload_object, + ) +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.views import images as images_view +from maasserver.views.images import format_size +from maastesting.matchers import ( + MockCalledOnceWith, + MockCalledWith, + ) +from requests import ConnectionError +from testtools.matchers import ( + ContainsAll, + HasLength, + ) + + +class UbuntuImagesTest(MAASServerTestCase): + + def patch_get_os_info_from_boot_sources( + self, sources, releases=None, arches=None): + if releases is None: + releases = [factory.make_name('release') for _ in range(3)] + if arches is None: + arches = [factory.make_name('arch') for _ in range(3)] + mock_get_os_info = self.patch( + images_view, 'get_os_info_from_boot_sources') + mock_get_os_info.return_value = (sources, releases, arches) + return mock_get_os_info + + def test_shows_connection_error(self): + self.client_log_in(as_admin=True) + mock_get_os_info = self.patch( + images_view, 'get_os_info_from_boot_sources') + mock_get_os_info.side_effect = ConnectionError() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + warnings = doc.cssselect('div#connection-error') + self.assertEqual(1, len(warnings)) + + def test_shows_no_ubuntu_sources(self): + self.client_log_in(as_admin=True) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + warnings = doc.cssselect('div#no-ubuntu-sources') + self.assertEqual(1, len(warnings)) + + def test_shows_too_many_ubuntu_sources(self): + self.client_log_in(as_admin=True) + sources = [factory.make_BootSource() for _ in range(2)] + self.patch_get_os_info_from_boot_sources(sources) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + warnings = doc.cssselect('div#too-many-ubuntu-sources') + self.assertEqual(1, len(warnings)) + + def test_shows_release_options(self): + self.client_log_in(as_admin=True) + sources = [factory.make_BootSource()] + releases = [factory.make_name('release') for _ in range(3)] + self.patch_get_os_info_from_boot_sources(sources, releases=releases) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + releases_content = doc.cssselect( + 'ul#ubuntu-releases')[0].text_content() + self.assertThat(releases_content, ContainsAll(releases)) + + def test_shows_architecture_options(self): + self.client_log_in(as_admin=True) + sources = [factory.make_BootSource()] + arches = [factory.make_name('arch') for _ in range(3)] + self.patch_get_os_info_from_boot_sources(sources, arches=arches) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + arches_content = doc.cssselect( + 'ul#ubuntu-arches')[0].text_content() + self.assertThat(arches_content, ContainsAll(arches)) + + def test_shows_missing_images_warning_if_not_ubuntu_boot_resources(self): + self.client_log_in() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + warnings = doc.cssselect('div#missing-ubuntu-images') + self.assertEqual(1, len(warnings)) + + def test_hides_import_button_if_not_admin(self): + self.client_log_in() + sources = [factory.make_BootSource()] + self.patch_get_os_info_from_boot_sources(sources) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + import_button = doc.cssselect( + '#ubuntu-images')[0].cssselect('input[type="submit"]') + self.assertEqual(0, len(import_button)) + + def test_shows_import_button_if_admin(self): + self.client_log_in(as_admin=True) + sources = [factory.make_BootSource()] + self.patch_get_os_info_from_boot_sources(sources) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + import_button = doc.cssselect( + '#ubuntu-images')[0].cssselect('input[type="submit"]') + self.assertEqual(1, len(import_button)) + + def test_post_returns_forbidden_if_not_admin(self): + self.client_log_in() + response = self.client.post( + reverse('images'), {'ubuntu_images': 1}) + self.assertEqual(httplib.FORBIDDEN, response.status_code) + + def test_import_calls_import_resources(self): + self.client_log_in(as_admin=True) + sources = [factory.make_BootSource()] + self.patch_get_os_info_from_boot_sources(sources) + mock_import = self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'ubuntu_images': 1}) + self.assertEqual(httplib.FOUND, response.status_code) + self.assertThat(mock_import, MockCalledOnceWith()) + + def test_import_sets_empty_selections(self): + self.client_log_in(as_admin=True) + source = factory.make_BootSource() + self.patch_get_os_info_from_boot_sources([source]) + self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'ubuntu_images': 1}) + self.assertEqual(httplib.FOUND, response.status_code) + + selections = BootSourceSelection.objects.filter(boot_source=source) + self.assertThat(selections, HasLength(1)) + self.assertEqual( + (selections[0].os, selections[0].release, + selections[0].arches, selections[0].subarches, + selections[0].labels), + ("ubuntu", "", [], ["*"], ["*"])) + + def test_import_sets_release_selections(self): + self.client_log_in(as_admin=True) + source = factory.make_BootSource() + releases = [factory.make_name('release') for _ in range(3)] + self.patch_get_os_info_from_boot_sources([source]) + self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'ubuntu_images': 1, 'release': releases}) + self.assertEqual(httplib.FOUND, response.status_code) + + selections = BootSourceSelection.objects.filter(boot_source=source) + self.assertThat(selections, HasLength(len(releases))) + self.assertItemsEqual( + releases, + [selection.release for selection in selections]) + + def test_import_sets_arches_on_selections(self): + self.client_log_in(as_admin=True) + source = factory.make_BootSource() + releases = [factory.make_name('release') for _ in range(3)] + arches = [factory.make_name('arches') for _ in range(3)] + self.patch_get_os_info_from_boot_sources([source]) + self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), + {'ubuntu_images': 1, 'release': releases, 'arch': arches}) + self.assertEqual(httplib.FOUND, response.status_code) + + selections = BootSourceSelection.objects.filter(boot_source=source) + self.assertThat(selections, HasLength(len(releases))) + self.assertItemsEqual( + [arches, arches, arches], + [selection.arches for selection in selections]) + + def test_import_removes_old_selections(self): + self.client_log_in(as_admin=True) + source = factory.make_BootSource() + release = factory.make_name('release') + delete_selection = BootSourceSelection.objects.create( + boot_source=source, os='ubuntu', + release=factory.make_name('release')) + keep_selection = BootSourceSelection.objects.create( + boot_source=source, os='ubuntu', release=release) + self.patch_get_os_info_from_boot_sources([source]) + self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'ubuntu_images': 1, 'release': [release]}) + self.assertEqual(httplib.FOUND, response.status_code) + self.assertIsNone(reload_object(delete_selection)) + self.assertIsNotNone(reload_object(keep_selection)) + + +class OtherImagesTest(MAASServerTestCase): + + def make_other_resource(self, os=None, arch=None, subarch=None, + release=None): + if os is None: + os = factory.make_name('os') + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + if release is None: + release = factory.make_name('release') + name = '%s/%s' % (os, release) + architecture = '%s/%s' % (arch, subarch) + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture=architecture) + resource_set = factory.make_BootResourceSet(resource) + factory.make_boot_resource_file_with_content(resource_set) + return resource + + def test_hides_other_synced_images_section(self): + self.client_log_in() + BootSourceCache.objects.all().delete() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + section = doc.cssselect('div#other-sync-images') + self.assertEqual( + 0, len(section), "Didn't hide the other images section.") + + def test_shows_other_synced_images_section(self): + self.client_log_in(as_admin=True) + factory.make_BootSourceCache() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + section = doc.cssselect('div#other-sync-images') + self.assertEqual( + 1, len(section), "Didn't show the other images section.") + + def test_hides_image_from_boot_source_cache_without_admin(self): + self.client_log_in() + factory.make_BootSourceCache() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + rows = doc.cssselect('table#other-resources > tbody > tr') + self.assertEqual( + 0, len(rows), "Didn't hide unselected boot image from non-admin.") + + def test_shows_image_from_boot_source_cache_with_admin(self): + self.client_log_in(as_admin=True) + cache = factory.make_BootSourceCache() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + title = doc.cssselect( + 'table#other-resources > tbody > ' + 'tr > td')[1].text_content().strip() + self.assertEqual('%s/%s' % (cache.os, cache.release), title) + + def test_shows_checkbox_for_boot_source_cache(self): + self.client_log_in(as_admin=True) + factory.make_BootSourceCache() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + checkbox = doc.cssselect( + 'table#other-resources > tbody > tr > td > input') + self.assertEqual( + 1, len(checkbox), "Didn't show checkbox for boot image.") + + def test_shows_last_update_time_for_synced_resource(self): + self.client_log_in(as_admin=True) + cache = factory.make_BootSourceCache() + self.make_other_resource( + os=cache.os, arch=cache.arch, + subarch=cache.subarch, release=cache.release) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + last_update = doc.cssselect( + 'table#other-resources > tbody > ' + 'tr > td')[5].text_content().strip() + self.assertNotEqual('not synced', last_update) + + def test_shows_number_of_nodes_for_synced_resource(self): + self.client_log_in(as_admin=True) + cache = factory.make_BootSourceCache() + resource = self.make_other_resource( + os=cache.os, arch=cache.arch, + subarch=cache.subarch, release=cache.release) + factory.make_Node( + status=NODE_STATUS.DEPLOYED, + osystem=cache.os, distro_series=cache.release, + architecture=resource.architecture) + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + number_of_nodes = doc.cssselect( + 'table#other-resources > tbody > ' + 'tr > td')[4].text_content().strip() + self.assertEqual( + 1, int(number_of_nodes), + "Incorrect number of deployed nodes for resource.") + + def test_shows_apply_button_if_admin(self): + self.client_log_in(as_admin=True) + factory.make_BootSourceCache() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + apply_button = doc.cssselect( + '#other-sync-images')[0].cssselect('input[type="submit"]') + self.assertEqual( + 1, len(apply_button), "Didn't show apply button for admin.") + + def test_hides_apply_button_if_import_running(self): + self.client_log_in(as_admin=True) + factory.make_BootSourceCache() + self.patch( + images_view, 'is_import_resources_running').return_value = True + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + apply_button = doc.cssselect( + '#other-sync-images')[0].cssselect('input[type="submit"]') + self.assertEqual( + 0, len(apply_button), + "Didn't hide apply button when import running.") + + def test_calls_get_os_release_title_for_other_resource(self): + self.client_log_in() + title = factory.make_name('title') + cache = factory.make_BootSourceCache() + resource = self.make_other_resource( + os=cache.os, arch=cache.arch, + subarch=cache.subarch, release=cache.release) + mock_get_title = self.patch(images_view, 'get_os_release_title') + mock_get_title.return_value = title + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + row_title = doc.cssselect( + 'table#other-resources > tbody > ' + 'tr > td')[1].text_content().strip() + self.assertEqual(title, row_title) + os, release = resource.name.split('/') + self.assertThat(mock_get_title, MockCalledWith(os, release)) + + def test_post_returns_forbidden_if_not_admin(self): + self.client_log_in() + response = self.client.post( + reverse('images'), {'other_images': 1}) + self.assertEqual(httplib.FORBIDDEN, response.status_code) + + def test_post_clears_all_other_os_selections(self): + self.client_log_in(as_admin=True) + source = factory.make_BootSource() + ubuntu_selection = BootSourceSelection.objects.create( + boot_source=source, os='ubuntu') + other_selection = BootSourceSelection.objects.create( + boot_source=source, os=factory.make_name('os')) + self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'other_images': 1, 'image': []}) + self.assertEqual(httplib.FOUND, response.status_code) + self.assertIsNotNone(reload_object(ubuntu_selection)) + self.assertIsNone(reload_object(other_selection)) + + def test_post_creates_selection_with_multiple_arches(self): + self.client_log_in(as_admin=True) + source = factory.make_BootSource() + os = factory.make_name('os') + release = factory.make_name('release') + arches = [factory.make_name('arch') for _ in range(3)] + images = [] + for arch in arches: + factory.make_BootSourceCache( + boot_source=source, os=os, release=release, arch=arch) + images.append('%s/%s/subarch/%s' % (os, arch, release)) + self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'other_images': 1, 'image': images}) + self.assertEqual(httplib.FOUND, response.status_code) + + selection = get_one(BootSourceSelection.objects.filter( + boot_source=source, os=os, release=release)) + self.assertIsNotNone(selection) + self.assertItemsEqual(arches, selection.arches) + + def test_post_calls_import_resources(self): + self.client_log_in(as_admin=True) + mock_import = self.patch(images_view, 'import_resources') + response = self.client.post( + reverse('images'), {'other_images': 1, 'image': []}) + self.assertEqual(httplib.FOUND, response.status_code) + self.assertThat(mock_import, MockCalledOnceWith()) + + +class GeneratedImagesTest(MAASServerTestCase): + + def make_generated_resource(self, os=None, arch=None, subarch=None, + release=None): + if os is None: + os = factory.make_name('os') + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + if release is None: + release = factory.make_name('release') + name = '%s/%s' % (os, release) + architecture = '%s/%s' % (arch, subarch) + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.GENERATED, + name=name, architecture=architecture) + resource_set = factory.make_BootResourceSet(resource) + factory.make_boot_resource_file_with_content(resource_set) + return resource + + def test_hides_generated_images_section(self): + self.client_log_in() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + section = doc.cssselect('div#generated-images') + self.assertEqual( + 0, len(section), "Didn't hide the generated images section.") + + def test_shows_generated_images_section(self): + self.client_log_in() + self.make_generated_resource() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + section = doc.cssselect('div#generated-images') + self.assertEqual( + 1, len(section), "Didn't show the generated images section.") + + def test_shows_generated_resources(self): + self.client_log_in() + resources = [self.make_generated_resource() for _ in range(3)] + names = [resource.name for resource in resources] + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + table_content = doc.cssselect( + 'table#generated-resources')[0].text_content() + self.assertThat(table_content, ContainsAll(names)) + + def test_shows_delete_button_for_generated_resource(self): + self.client_log_in(as_admin=True) + self.make_generated_resource() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + delete_btn = doc.cssselect( + 'table#generated-resources > tbody > tr > td > ' + 'a[title="Delete image"]') + self.assertEqual( + 1, len(delete_btn), + "Didn't show delete button for generated image.") + + def test_hides_delete_button_for_generated_resource_when_not_admin(self): + self.client_log_in() + self.make_generated_resource() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + delete_btn = doc.cssselect( + 'table#generated-resources > tbody > tr > td > ' + 'a[title="Delete image"]') + self.assertEqual( + 0, len(delete_btn), + "Didn't hide delete button for generated image when not admin.") + + def test_calls_get_os_release_title_for_generated_resource(self): + self.client_log_in() + title = factory.make_name('title') + resource = self.make_generated_resource() + mock_get_title = self.patch(images_view, 'get_os_release_title') + mock_get_title.return_value = title + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + row_title = doc.cssselect( + 'table#generated-resources > tbody > ' + 'tr > td')[1].text_content().strip() + self.assertEqual(title, row_title) + os, release = resource.name.split('/') + self.assertThat(mock_get_title, MockCalledOnceWith(os, release)) + + +class UploadedImagesTest(MAASServerTestCase): + + def make_uploaded_resource(self, name=None): + if name is None: + name = factory.make_name('name') + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + architecture = '%s/%s' % (arch, subarch) + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.UPLOADED, + name=name, architecture=architecture) + resource_set = factory.make_BootResourceSet(resource) + factory.make_boot_resource_file_with_content(resource_set) + return resource + + def test_shows_no_custom_images_message(self): + self.client_log_in() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + warnings = doc.cssselect('div#no-custom-images') + self.assertEqual(1, len(warnings)) + + def test_shows_uploaded_resources(self): + self.client_log_in() + names = [factory.make_name('name') for _ in range(3)] + [self.make_uploaded_resource(name) for name in names] + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + table_content = doc.cssselect( + 'table#uploaded-resources')[0].text_content() + self.assertThat(table_content, ContainsAll(names)) + + def test_shows_uploaded_resources_name_if_title_blank(self): + self.client_log_in() + name = factory.make_name('name') + resource = self.make_uploaded_resource(name) + resource.extra['title'] = '' + resource.save() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + name_col = doc.cssselect( + 'table#uploaded-resources > tbody > tr > td')[1].text_content() + self.assertEqual(name, name_col.strip()) + + def test_shows_delete_button_for_uploaded_resource(self): + self.client_log_in(as_admin=True) + self.make_uploaded_resource() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + delete_btn = doc.cssselect( + 'table#uploaded-resources > tbody > tr > td > ' + 'a[title="Delete image"]') + self.assertEqual(1, len(delete_btn)) + + def test_hides_delete_button_for_uploaded_resource_when_not_admin(self): + self.client_log_in() + self.make_uploaded_resource() + response = self.client.get(reverse('images')) + doc = fromstring(response.content) + delete_btn = doc.cssselect( + 'table#uploaded-resources > tbody > tr > td > ' + 'a[title="Delete image"]') + self.assertEqual(0, len(delete_btn)) + + +class TestImageAjax(MAASServerTestCase): + + def get_images_ajax(self): + return self.client.get( + reverse('images'), HTTP_X_REQUESTED_WITH='XMLHttpRequest') + + def test__returns_json(self): + self.client_log_in() + response = self.get_images_ajax() + self.assertEqual('application/json', response['Content-Type']) + + def test__returns_region_import_running_True(self): + self.client_log_in() + self.patch( + images_view, 'is_import_resources_running').return_value = True + response = self.get_images_ajax() + json_obj = json.loads(response.content) + self.assertTrue(json_obj['region_import_running']) + + def test__returns_region_import_running_False(self): + self.client_log_in() + self.patch( + images_view, 'is_import_resources_running').return_value = False + response = self.get_images_ajax() + json_obj = json.loads(response.content) + self.assertFalse(json_obj['region_import_running']) + + def test__returns_cluster_import_running_True(self): + self.client_log_in() + self.patch( + images_view, 'is_import_boot_images_running').return_value = True + response = self.get_images_ajax() + json_obj = json.loads(response.content) + self.assertTrue(json_obj['cluster_import_running']) + + def test__returns_cluster_import_running_False(self): + self.client_log_in() + self.patch( + images_view, 'is_import_boot_images_running').return_value = False + response = self.get_images_ajax() + json_obj = json.loads(response.content) + self.assertFalse(json_obj['cluster_import_running']) + + def test_returns_resources(self): + self.client_log_in() + resources = [factory.make_usable_boot_resource() for _ in range(3)] + resource_ids = [resource.id for resource in resources] + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_ids = [ + json_resource['id'] + for json_resource in json_obj['resources'] + ] + self.assertItemsEqual(resource_ids, json_ids) + + def test_returns_resource_attributes(self): + self.client_log_in() + factory.make_usable_boot_resource() + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertThat( + json_resource, + ContainsAll([ + 'id', 'rtype', 'name', 'title', 'arch', 'size', + 'complete', 'status', 'downloading', + 'numberOfNodes', 'lastUpdate'])) + + def test_returns_ubuntu_release_version_name(self): + self.client_log_in() + # Use trusty as known to map to "14.04 LTS" + version = '14.04 LTS' + name = 'ubuntu/trusty' + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual(version, json_resource['title']) + + def test_shows_number_of_nodes_deployed_for_resource(self): + self.client_log_in() + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + os_name, series = resource.name.split('/') + number_of_nodes = random.randint(1, 4) + for _ in range(number_of_nodes): + factory.make_Node( + status=NODE_STATUS.DEPLOYED, + osystem=os_name, distro_series=series, + architecture=resource.architecture) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) + + def test_shows_number_of_nodes_deployed_for_resource_with_defaults(self): + self.client_log_in() + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + os_name, series = resource.name.split('/') + Config.objects.set_config('default_osystem', os_name) + Config.objects.set_config('default_distro_series', series) + number_of_nodes = random.randint(1, 4) + for _ in range(number_of_nodes): + factory.make_Node( + status=NODE_STATUS.DEPLOYED, + architecture=resource.architecture) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) + + def test_shows_number_of_nodes_deployed_for_ubuntu_subarch_resource(self): + self.client_log_in() + resource = factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED) + arch, subarch = resource.split_arch() + extra_subarch = factory.make_name('subarch') + resource.extra['subarches'] = ','.join([subarch, extra_subarch]) + resource.save() + + os_name, series = resource.name.split('/') + node_architecture = '%s/%s' % (arch, extra_subarch) + number_of_nodes = random.randint(1, 4) + for _ in range(number_of_nodes): + factory.make_Node( + status=NODE_STATUS.DEPLOYED, + osystem=os_name, distro_series=series, + architecture=node_architecture) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) + + def test_combines_subarch_resources_into_one_resource(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + for subarch in subarches: + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + self.assertEqual( + 1, len(json_obj['resources']), + 'More than one resource was returned.') + + def test_combined_subarch_resource_calculates_unique_size(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + largefile = factory.make_LargeFile() + for subarch in subarches: + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + resource_set = factory.make_BootResourceSet(resource) + factory.make_BootResourceFile(resource_set, largefile) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual( + format_size(largefile.total_size), json_resource['size']) + + def test_combined_subarch_resource_calculates_num_of_nodes_deployed(self): + self.client_log_in() + osystem = 'ubuntu' + series = factory.make_name('series') + name = '%s/%s' % (osystem, series) + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + for subarch in subarches: + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + + number_of_nodes = random.randint(1, 4) + for _ in range(number_of_nodes): + subarch = random.choice(subarches) + node_architecture = '%s/%s' % (arch, subarch) + factory.make_Node( + status=NODE_STATUS.DEPLOYED, + osystem=osystem, distro_series=series, + architecture=node_architecture) + + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) + + def test_combined_subarch_resource_calculates_complete_True(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + resources = [ + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + for subarch in subarches + ] + self.patch( + BootResource.objects, + 'get_resources_matching_boot_images').return_value = resources + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertTrue(json_resource['complete']) + + def test_combined_subarch_resource_calculates_complete_False(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + incomplete_subarch = subarches.pop() + factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, incomplete_subarch)) + for subarch in subarches: + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertFalse(json_resource['complete']) + + def test_combined_subarch_resource_calculates_progress(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + largefile = factory.make_LargeFile() + largefile.total_size = largefile.total_size * 2 + largefile.save() + for subarch in subarches: + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + resource_set = factory.make_BootResourceSet(resource) + factory.make_BootResourceFile(resource_set, largefile) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual("Downloading 50%", json_resource['status']) + + def test_combined_subarch_resource_shows_queued_if_no_progress(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + largefile = factory.make_LargeFile(content="") + for subarch in subarches: + resource = factory.make_BootResource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + resource_set = factory.make_BootResourceSet(resource) + factory.make_BootResourceFile(resource_set, largefile) + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual("Queued for download", json_resource['status']) + + def test_combined_subarch_resource_shows_complete_status(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + resources = [ + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + for subarch in subarches + ] + self.patch( + BootResource.objects, + 'get_resources_matching_boot_images').return_value = resources + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual("Complete", json_resource['status']) + + def test_combined_subarch_resource_shows_waiting_for_cluster_to_sync(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + for subarch in subarches: + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + self.patch( + BootResource.objects, + 'get_resources_matching_boot_images').return_value = [] + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual( + "Waiting for clusters to sync", json_resource['status']) + + def test_combined_subarch_resource_shows_clusters_syncing(self): + self.client_log_in() + name = 'ubuntu/%s' % factory.make_name('series') + arch = factory.make_name('arch') + subarches = [factory.make_name('subarch') for _ in range(3)] + for subarch in subarches: + factory.make_usable_boot_resource( + rtype=BOOT_RESOURCE_TYPE.SYNCED, + name=name, architecture='%s/%s' % (arch, subarch)) + self.patch( + BootResource.objects, + 'get_resources_matching_boot_images').return_value = [] + self.patch( + images_view, 'is_import_boot_images_running').return_value = True + response = self.get_images_ajax() + json_obj = json.loads(response.content) + json_resource = json_obj['resources'][0] + self.assertEqual( + "Syncing to clusters", json_resource['status']) + + +class TestImageDelete(MAASServerTestCase): + + def test_non_admin_cannot_delete(self): + self.client_log_in() + resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) + response = self.client.post( + reverse('image-delete', args=[resource.id])) + self.assertEqual(httplib.FORBIDDEN, response.status_code) + self.assertIsNotNone(reload_object(resource)) + + def test_deletes_resource(self): + self.client_log_in(as_admin=True) + resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) + response = self.client.post( + reverse('image-delete', args=[resource.id]), + {'post': 'yes'}) + self.assertEqual(httplib.FOUND, response.status_code) + self.assertIsNone(reload_object(resource)) + + def test_redirects_to_images(self): + self.client_log_in(as_admin=True) + resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) + response = self.client.post( + reverse('image-delete', args=[resource.id]), + {'post': 'yes'}) + self.assertEqual(reverse('images'), extract_redirect(response)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_longpoll.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_longpoll.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_longpoll.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_longpoll.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Longpoll-related views tests.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from unittest import skip - -from django.conf import settings -from maasserver import messages -from maasserver.exceptions import NoRabbit -from maasserver.testing.factory import factory -from maasserver.testing.rabbit import uses_rabbit_fixture -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.views.nodes import get_longpoll_context - - -class TestGetLongpollContext(MAASServerTestCase): - - def test_get_longpoll_context_empty_if_rabbitmq_publish_is_none(self): - self.patch(settings, 'RABBITMQ_PUBLISH', None) - messages.MESSAGING.reset() - self.assertEqual({}, get_longpoll_context()) - - def test_get_longpoll_context_returns_empty_if_rabbit_not_running(self): - - class FakeMessaging: - """Fake :class:`RabbitMessaging`: fail with `NoRabbit`.""" - - def getQueue(self, *args, **kwargs): - raise NoRabbit("Pretending not to have a rabbit.") - - self.patch(messages.MESSAGING, '_cached_messaging', FakeMessaging()) - self.assertEqual({}, get_longpoll_context()) - - def test_get_longpoll_context_empty_if_longpoll_url_is_None(self): - self.patch(settings, 'LONGPOLL_PATH', None) - messages.MESSAGING.reset() - self.assertEqual({}, get_longpoll_context()) - - @skip( - "XXX: GavinPanella 2012-09-27 bug=1057250: Causes test " - "failures in unrelated parts of the test suite.") - @uses_rabbit_fixture - def test_get_longpoll_context(self): - longpoll = factory.getRandomString() - self.patch(settings, 'LONGPOLL_PATH', longpoll) - self.patch(settings, 'RABBITMQ_PUBLISH', True) - messages.MESSAGING.reset() - context = get_longpoll_context() - self.assertItemsEqual( - ['LONGPOLL_PATH', 'longpoll_queue'], context) - self.assertEqual(longpoll, context['LONGPOLL_PATH']) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_networks.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_networks.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_networks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_networks.py 2015-07-10 01:27:14.000000000 +0000 @@ -25,9 +25,9 @@ from maasserver.testing import ( extract_redirect, get_content_links, - reload_object, ) from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.orm import get_one from maasserver.views.networks import ( @@ -57,10 +57,11 @@ # Network listing displays the network name, description, # network information and VLAN tag. self.client_log_in() - factory.make_networks(3) + factory.make_Networks(3) networks = Network.objects.all() # Attach some NICs to some of the networks. - [factory.make_mac_address(networks=networks[1:3]) for _ in range(12)] + [factory.make_MACAddress_with_Node(networks=networks[1:3]) + for _ in range(12)] response = self.client.get(reverse('network-list')) details_list = [ [ @@ -68,6 +69,9 @@ network.description[:20], '%s' % network.get_network(), '%s' % network.vlan_tag if network.vlan_tag else '', + '%s' % ( + network.default_gateway if network.default_gateway + else ''), '%d' % network.get_connected_nodes().count(), ] for network in networks] @@ -77,7 +81,7 @@ def test_network_list_displays_sorted_list_of_networks(self): # Networks are alphabetically sorted on the network list page. self.client_log_in() - factory.make_networks(3, sortable_name=True) + factory.make_Networks(3, sortable_name=True) networks = Network.objects.all() sorted_networks = sorted(networks, key=attrgetter('name')) response = self.client.get(reverse('network-list')) @@ -91,7 +95,7 @@ def test_network_list_displays_links_to_network_node(self): self.client_log_in() - factory.make_networks(3, sortable_name=True) + factory.make_Networks(3, sortable_name=True) networks = Network.objects.all() sorted_networks = sorted(networks, key=attrgetter('name')) response = self.client.get(reverse('network-list')) @@ -107,7 +111,7 @@ def test_network_listing_is_paginated(self): self.patch(NetworkListView, "paginate_by", 3) self.client_log_in(as_admin=True) - factory.make_networks(4) + factory.make_Networks(4) response = self.client.get(reverse('network-list')) self.assertEqual(httplib.OK, response.status_code) doc = fromstring(response.content) @@ -120,7 +124,7 @@ def test_network_list_does_not_contain_edit_and_delete_links(self): self.client_log_in() - networks = factory.make_networks(3) + networks = factory.make_Networks(3) response = self.client.get(reverse('network-list')) network_edit_links = [ reverse('network-edit', args=[network.name]) @@ -149,7 +153,7 @@ def test_network_list_contains_edit_links(self): self.client_log_in(as_admin=True) - networks = factory.make_networks(3) + networks = factory.make_Networks(3) network_edit_links = [ reverse('network-edit', args=[network.name]) for network in networks @@ -189,13 +193,15 @@ def test_adds_network(self): self.client_log_in(as_admin=True) - network = factory.getRandomNetwork() + network = factory.make_ipv4_network() definition = { 'name': factory.make_name('network'), - 'description': factory.getRandomString(), + 'description': factory.make_string(), 'ip': "%s" % network.cidr.ip, 'netmask': "%s" % network.netmask, 'vlan_tag': factory.make_vlan_tag(), + 'default_gateway': factory.make_ipv4_address(), + 'dns_servers': factory.make_ipv4_address(), } response = self.client.post(reverse('network-add'), definition) self.assertEqual(httplib.FOUND, response.status_code) @@ -217,7 +223,7 @@ # The Network detail view displays the network name and the network # description. self.client_log_in() - network = factory.make_network() + network = factory.make_Network() response = self.client.get( reverse('network-view', args=[network.name])) self.assertThat( @@ -232,8 +238,9 @@ def test_network_detail_displays_node_count(self): self.client_log_in() - network = factory.make_network() - [factory.make_mac_address(networks=[network]) for i in range(5)] + network = factory.make_Network() + [factory.make_MACAddress_with_Node(networks=[network]) + for _ in range(5)] response = self.client.get( reverse('network-view', args=[network.name])) document = fromstring(response.content) @@ -243,7 +250,7 @@ def test_network_detail_escapes_description(self): nasty_description = 'AC&D' self.client_log_in() - network = factory.make_network(description=nasty_description) + network = factory.make_Network(description=nasty_description) response = self.client.get( reverse('network-view', args=[network.name])) doc = fromstring(response.content) @@ -256,7 +263,7 @@ def test_network_detail_does_not_contain_edit_link(self): self.client_log_in() - network = factory.make_network() + network = factory.make_Network() response = self.client.get( reverse('network-view', args=[network.name])) network_edit_link = reverse('network-edit', args=[network.name]) @@ -264,7 +271,7 @@ def test_network_detail_does_not_contain_delete_link(self): self.client_log_in() - network = factory.make_network() + network = factory.make_Network() response = self.client.get( reverse('network-view', args=[network.name])) network_delete_link = reverse('network-del', args=[network.name]) @@ -275,7 +282,7 @@ def test_network_detail_contains_edit_link(self): self.client_log_in(as_admin=True) - network = factory.make_network() + network = factory.make_Network() response = self.client.get( reverse('network-view', args=[network.name])) network_edit_link = reverse('network-edit', args=[network.name]) @@ -283,7 +290,7 @@ def test_network_detail_contains_delete_link(self): self.client_log_in(as_admin=True) - network = factory.make_network() + network = factory.make_Network() response = self.client.get( reverse('network-view', args=[network.name])) network_delete_link = reverse('network-del', args=[network.name]) @@ -294,7 +301,7 @@ def test_cannot_access_network_edit(self): self.client_log_in() - network = factory.make_network() + network = factory.make_Network() response = self.client.post( reverse('network-edit', args=[network.name])) self.assertEqual(reverse('login'), extract_redirect(response)) @@ -304,18 +311,22 @@ def test_network_edit(self): self.client_log_in(as_admin=True) - network = factory.make_network() + network = factory.make_Network() new_name = factory.make_name('name') new_description = factory.make_name('description') new_macs = [ - factory.make_mac_address() + factory.make_MACAddress_with_Node() for _ in range(3)] + new_gateway = factory.make_ipv4_address() + new_dns_servers = factory.make_ipv4_address() response = self.client.post( reverse('network-edit', args=[network.name]), data={ 'name': new_name, 'description': new_description, + 'default_gateway': new_gateway, 'mac_addresses': new_macs, + 'dns_servers': new_dns_servers, }) self.assertEqual( reverse('network-list'), extract_redirect(response), @@ -332,7 +343,7 @@ def test_cannot_delete(self): self.client_log_in() - network = factory.make_network() + network = factory.make_Network() response = self.client.post( reverse('network-del', args=[network.name])) self.assertEqual(reverse('login'), extract_redirect(response)) @@ -343,7 +354,7 @@ def test_deletes_network(self): self.client_log_in(as_admin=True) - network = factory.make_network() + network = factory.make_Network() response = self.client.post( reverse('network-del', args=[network.name]), {'post': 'yes'}) @@ -352,7 +363,7 @@ def test_redirects_to_listing(self): self.client_log_in(as_admin=True) - network = factory.make_network() + network = factory.make_Network() response = self.client.post( reverse('network-del', args=[network.name]), {'post': 'yes'}) @@ -360,8 +371,8 @@ def test_disconnects_macs(self): self.client_log_in(as_admin=True) - network = factory.make_network() - mac = factory.make_mac_address(networks=[network]) + network = factory.make_Network() + mac = factory.make_MACAddress_with_Node(networks=[network]) response = self.client.post( reverse('network-del', args=[network.name]), {'post': 'yes'}) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_nodecommissionresults.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_nodecommissionresults.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_nodecommissionresults.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_nodecommissionresults.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,284 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the `NodeCommissionResult` views.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import httplib -from operator import attrgetter -from random import randint - -from django.core.urlresolvers import reverse -from django.http.request import QueryDict -from lxml import html -from maasserver.testing import extract_redirect -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maasserver.utils.orm import get_one -from maasserver.views.nodecommissionresult import NodeCommissionResultListView -from mock import Mock -from testtools.matchers import HasLength - - -def normalise_whitespace(text): - """Replace any whitespace sequence with just a single space.""" - return ' '.join(text.split()) - - -class TestNodeCommissionResultView(MAASServerTestCase): - - def request_page(self, result): - """Request and parse the page for the given `NodeCommissionResult`. - - :return: The page's main content as an `lxml.html.HtmlElement`. - """ - link = reverse('nodecommissionresult-view', args=[result.id]) - response = self.client.get(link) - self.assertEqual(httplib.OK, response.status_code, response.content) - doc = html.fromstring(response.content) - return get_one(doc.cssselect('#content')) - - def extract_field(self, doc, field_name, containing_tag='span'): - """Get the content text from one of the
  • fields on the page. - - This works on the basis that each of the fields has an `id` attribute - which is unique on the page, and contains exactly one tag of the type - given as `containing_tag`, which holds the field value. - """ - field = get_one(doc.cssselect('#' + field_name)) - value = get_one(field.cssselect(containing_tag)) - return value.text_content().strip() - - def test_requires_admin(self): - self.client_log_in(as_admin=False) - result = factory.make_node_commission_result() - response = self.client.get( - reverse('nodecommissionresult-view', args=[result.id])) - self.assertEqual(reverse('login'), extract_redirect(response)) - - def test_displays_result(self): - self.client_log_in(as_admin=True) - result = factory.make_node_commission_result( - data=factory.getRandomString().encode('ascii')) - doc = self.request_page(result) - - self.assertEqual(result.name, self.extract_field(doc, 'name')) - self.assertEqual( - result.node.hostname, - self.extract_field(doc, 'node')) - self.assertEqual( - "%d" % result.script_result, - self.extract_field(doc, 'script-result')) - self.assertEqual(result.data, self.extract_field(doc, 'output', 'pre')) - - def test_escapes_html_in_output(self): - self.client_log_in(as_admin=True) - # It's actually surprisingly hard to test for this, because lxml - # un-escapes on parsing, and is very tolerant of malformed input. - # Parsing an un-escaped AC, however, would produce text "AC" - # (because the looks like a tag). - result = factory.make_node_commission_result(data=b'AC') - doc = self.request_page(result) - self.assertEqual('AC', self.extract_field(doc, 'output', 'pre')) - - def test_escapes_binary_in_output(self): - self.client_log_in(as_admin=True) - result = factory.make_node_commission_result(data=b'A\xffB') - doc = self.request_page(result) - self.assertEqual('A\ufffdB', self.extract_field(doc, 'output', 'pre')) - - def test_hides_output_if_empty(self): - self.client_log_in(as_admin=True) - result = factory.make_node_commission_result(data=b'') - doc = self.request_page(result) - field = get_one(doc.cssselect('#output')) - self.assertEqual('', field.text_content().strip()) - - -class TestNodeCommissionResultListView(MAASServerTestCase): - - def make_query_string(self, nodes): - """Compose a URL query string to filter for the given nodes.""" - return '&'.join('node=%s' % node.system_id for node in nodes) - - def request_page(self, nodes=None): - """Request and parse the page for the given `NodeCommissionResult`. - - :param node: Optional list of `Node` for which results should be - displayed. If not given, all results are displayed. - :return: The page's main content as an `lxml.html.HtmlElement`. - """ - link = reverse('nodecommissionresult-list') - if nodes is not None: - link += '?' + self.make_query_string(nodes) - response = self.client.get(link) - self.assertEqual(httplib.OK, response.status_code, response.content) - return get_one(html.fromstring(response.content).cssselect('#content')) - - def make_view(self, nodes=None): - if nodes is None: - query_string = '' - else: - query_string = self.make_query_string(nodes) - view = NodeCommissionResultListView() - view.request = Mock() - view.request.GET = QueryDict(query_string) - return view - - def test_requires_admin(self): - self.client_log_in(as_admin=False) - response = self.client.get(reverse('nodecommissionresult-list')) - self.assertEqual(reverse('login'), extract_redirect(response)) - - def test_lists_empty_page(self): - self.client_log_in(as_admin=True) - content = self.request_page() - self.assertIn( - "No matching commissioning results.", - content.text_content().strip()) - self.assertEqual([], content.cssselect('.result')) - - def test_lists_results(self): - self.client_log_in(as_admin=True) - result = factory.make_node_commission_result(script_result=0) - content = self.request_page() - result_row = get_one(content.cssselect('.result')) - fields = result_row.cssselect('td') - - [script_result, output_file, time, node] = fields - self.assertEqual('OK', script_result.text_content().strip()) - self.assertIn('%d' % result.created.year, time.text_content()) - self.assertEqual(result.node.hostname, node.text_content().strip()) - self.assertEqual( - reverse('node-view', args=[result.node.system_id]), - get_one(node.cssselect('a')).get('href')) - self.assertEqual(result.name, output_file.text_content().strip()) - - def test_shows_failure(self): - self.client_log_in(as_admin=True) - factory.make_node_commission_result(script_result=randint(1, 100)) - content = self.request_page() - result_row = get_one(content.cssselect('.result')) - fields = result_row.cssselect('td') - [script_result, _, _, _] = fields - self.assertEqual("FAILED", script_result.text_content().strip()) - self.assertNotEqual([], script_result.find_class('warning')) - - def test_links_to_result(self): - self.client_log_in(as_admin=True) - result = factory.make_node_commission_result( - script_result=randint(1, 100)) - content = self.request_page() - result_row = get_one(content.cssselect('.result')) - fields = result_row.cssselect('td') - [script_result, _, _, _] = fields - link = get_one(script_result.cssselect('a')) - self.assertEqual( - reverse('nodecommissionresult-view', args=[result.id]), - link.get('href')) - - def test_groups_by_node(self): - nodes = [factory.make_node() for _ in range(3)] - # Create two results per node, but interleave them so the results for - # any given node are unlikely to occur side by side by accident. - for _ in range(2): - for node in nodes: - factory.make_node_commission_result(node=node) - sorted_results = self.make_view().get_queryset() - self.assertEqual(sorted_results[0].node, sorted_results[1].node) - self.assertEqual(sorted_results[2].node, sorted_results[3].node) - self.assertEqual(sorted_results[4].node, sorted_results[5].node) - - def test_sorts_by_creation_time_for_same_node(self): - node = factory.make_node() - results = [ - factory.make_node_commission_result(node=node) - for _ in range(3) - ] - for result in results: - result.created -= factory.make_timedelta() - result.save() - - self.assertEqual( - sorted(results, key=attrgetter('created'), reverse=True), - list(self.make_view().get_queryset())) - - def test_sorts_by_name_for_same_node_and_creation_time(self): - node = factory.make_node() - results = { - factory.make_node_commission_result( - node=node, name=factory.make_name().lower()) - for _ in range(5) - } - self.assertEqual( - sorted(results, key=attrgetter('name')), - list(self.make_view().get_queryset())) - - def test_filters_by_node(self): - factory.make_node_commission_result() - node = factory.make_node() - node_results = { - factory.make_node_commission_result(node=node) for _ in range(3) - } - factory.make_node_commission_result() - - self.assertEqual( - node_results, - set(self.make_view(nodes=[node]).get_queryset())) - - def test_combines_node_filters(self): - # The nodes are passed as GET parameters, which means there is some - # subtlety to how they are passed to the application. Reading them - # naively would ignore all but the first node passed, so make sure we - # process all of them. - self.client_log_in(as_admin=True) - results = [factory.make_node_commission_result() for _ in range(3)] - matching_results = results[1:3] - content = self.request_page( - nodes=[result.node for result in matching_results]) - rows = content.cssselect('.result') - self.assertThat(rows, HasLength(len(matching_results))) - matching_names = set() - for row in rows: - [_, name, _, _] = row.cssselect('td') - matching_names.add(name.text_content().strip()) - self.assertEqual( - {result.name for result in matching_results}, - matching_names) - - def test_does_not_show_node_if_not_filtering_by_node(self): - self.client_log_in(as_admin=True) - doc = self.request_page() - header = get_one(doc.cssselect('#results_header')) - self.assertEqual( - "Commissioning results", - normalise_whitespace(header.text_content())) - - def test_shows_node_if_filtering_by_node(self): - self.client_log_in(as_admin=True) - node = factory.make_node() - doc = self.request_page(nodes=[node]) - header = get_one(doc.cssselect('#results_header')) - self.assertEqual( - "Commissioning results for %s" % node.hostname, - normalise_whitespace(header.text_content())) - - def test_shows_nodes_if_filtering_by_multiple_nodes(self): - self.client_log_in(as_admin=True) - names = [factory.make_name('node').lower() for _ in range(2)] - nodes = [factory.make_node(hostname=name) for name in names] - doc = self.request_page(nodes=nodes) - header = get_one(doc.cssselect('#results_header')) - self.assertEqual( - "Commissioning results for %s" % ', '.join(sorted(names)), - normalise_whitespace(header.text_content())) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_noderesult.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_noderesult.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_noderesult.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_noderesult.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,360 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the `NodeResult` views.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import httplib +from operator import attrgetter +from random import randint + +from django.core.urlresolvers import reverse +from django.http.request import QueryDict +from lxml import html +from maasserver.testing import extract_redirect +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.utils.orm import get_one +from maasserver.views.noderesult import NodeCommissionResultListView +from mock import Mock +from provisioningserver.utils.text import normalise_whitespace +from testtools.matchers import HasLength + + +def extract_field(doc, field_name, containing_tag='span'): + """Get the content text from one of the
  • fields on the page. + + This works on the basis that each of the fields has an `id` attribute + which is unique on the page, and contains exactly one tag of the type + given as `containing_tag`, which holds the field value. + """ + field = get_one(doc.cssselect('#' + field_name)) + value = get_one(field.cssselect(containing_tag)) + return value.text_content().strip() + + +class TestNodeInstallResultView(MAASServerTestCase): + + def request_page(self, result): + """Request and parse the page for the given `NodeResult`. + + :return: The page's main content as an `lxml.html.HtmlElement`. + """ + link = reverse('nodeinstallresult-view', args=[result.id]) + response = self.client.get(link) + self.assertEqual(httplib.OK, response.status_code, response.content) + doc = html.fromstring(response.content) + return get_one(doc.cssselect('#content')) + + def test_installation_forbidden_without_edit_perm(self): + self.client_log_in(as_admin=False) + result = factory.make_NodeResult_for_installation() + response = self.client.get( + reverse('nodeinstallresult-view', args=[result.id])) + self.assertEqual(httplib.FORBIDDEN, response.status_code) + + def test_installation_allowed_with_edit_perm(self): + password = 'test' + user = factory.make_User(password=password) + node = factory.make_Node(owner=user) + self.client.login(username=user.username, password=password) + self.logged_in_user = user + result = factory.make_NodeResult_for_installation(node=node) + response = self.client.get( + reverse('nodeinstallresult-view', args=[result.id])) + self.assertEqual(httplib.OK, response.status_code) + + def test_installation_escapes_html_in_output(self): + self.client_log_in(as_admin=True) + # It's actually surprisingly hard to test for this, because lxml + # un-escapes on parsing, and is very tolerant of malformed input. + # Parsing an un-escaped AC, however, would produce text "AC" + # (because the looks like a tag). + result = factory.make_NodeResult_for_installation(data=b'AC') + doc = self.request_page(result) + self.assertEqual('AC', extract_field(doc, 'output', 'pre')) + + def test_installation_escapes_binary_in_output(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_installation(data=b'A\xffB') + doc = self.request_page(result) + self.assertEqual('A\ufffdB', extract_field(doc, 'output', 'pre')) + + def test_installation_hides_output_if_empty(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_installation(data=b'') + doc = self.request_page(result) + field = get_one(doc.cssselect('#output')) + self.assertEqual('', field.text_content().strip()) + + +class TestNodeCommissionResultView(MAASServerTestCase): + + def request_page(self, result): + """Request and parse the page for the given `NodeResult`. + + :return: The page's main content as an `lxml.html.HtmlElement`. + """ + link = reverse('nodecommissionresult-view', args=[result.id]) + response = self.client.get(link) + self.assertEqual(httplib.OK, response.status_code, response.content) + doc = html.fromstring(response.content) + return get_one(doc.cssselect('#content')) + + def test_commissioning_forbidden_without_edit_perm(self): + self.client_log_in(as_admin=False) + result = factory.make_NodeResult_for_commissioning() + response = self.client.get( + reverse('nodecommissionresult-view', args=[result.id])) + self.assertEqual(httplib.FORBIDDEN, response.status_code) + + def test_commissioning_allowed_with_edit_perm(self): + password = 'test' + user = factory.make_User(password=password) + node = factory.make_Node(owner=user) + self.client.login(username=user.username, password=password) + self.logged_in_user = user + result = factory.make_NodeResult_for_commissioning(node=node) + response = self.client.get( + reverse('nodecommissionresult-view', args=[result.id])) + self.assertEqual(httplib.OK, response.status_code) + + def test_commissioning_displays_result(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_commissioning( + data=factory.make_string().encode('ascii')) + doc = self.request_page(result) + + self.assertEqual(result.name, extract_field(doc, 'name')) + self.assertEqual( + result.node.hostname, + extract_field(doc, 'node')) + self.assertEqual( + "%d" % result.script_result, + extract_field(doc, 'script-result')) + self.assertEqual(result.data, extract_field(doc, 'output', 'pre')) + + def test_commissioning_escapes_html_in_output(self): + self.client_log_in(as_admin=True) + # It's actually surprisingly hard to test for this, because lxml + # un-escapes on parsing, and is very tolerant of malformed input. + # Parsing an un-escaped AC, however, would produce text "AC" + # (because the looks like a tag). + result = factory.make_NodeResult_for_commissioning(data=b'AC') + doc = self.request_page(result) + self.assertEqual('AC', extract_field(doc, 'output', 'pre')) + + def test_commissioning_escapes_binary_in_output(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_commissioning(data=b'A\xffB') + doc = self.request_page(result) + self.assertEqual('A\ufffdB', extract_field(doc, 'output', 'pre')) + + def test_commissioning_hides_output_if_empty(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_commissioning(data=b'') + doc = self.request_page(result) + field = get_one(doc.cssselect('#output')) + self.assertEqual('', field.text_content().strip()) + + +class TestNodeCommissionResultListView(MAASServerTestCase): + + def make_query_string(self, nodes): + """Compose a URL query string to filter for the given nodes.""" + return '&'.join('node=%s' % node.system_id for node in nodes) + + def request_page(self, nodes=None): + """Request and parse the page for the given `NodeResult`. + + :param node: Optional list of `Node` for which results should be + displayed. If not given, all results are displayed. + :return: The page's main content as an `lxml.html.HtmlElement`. + """ + link = reverse('nodecommissionresult-list') + if nodes is not None: + link += '?' + self.make_query_string(nodes) + response = self.client.get(link) + self.assertEqual(httplib.OK, response.status_code, response.content) + return get_one(html.fromstring(response.content).cssselect('#content')) + + def make_view(self, nodes=None): + if nodes is None: + query_string = '' + else: + query_string = self.make_query_string(nodes) + view = NodeCommissionResultListView() + view.request = Mock() + view.request.GET = QueryDict(query_string) + return view + + def test_requires_admin(self): + self.client_log_in(as_admin=False) + response = self.client.get(reverse('nodecommissionresult-list')) + self.assertEqual(reverse('login'), extract_redirect(response)) + + def test_lists_empty_page(self): + self.client_log_in(as_admin=True) + content = self.request_page() + self.assertIn( + "No matching commissioning results.", + content.text_content().strip()) + self.assertEqual([], content.cssselect('.result')) + + def test_lists_results(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_commissioning(script_result=0) + content = self.request_page() + result_row = get_one(content.cssselect('.result')) + fields = result_row.cssselect('td') + + [script_result, output_file, time, node] = fields + self.assertEqual('OK', script_result.text_content().strip()) + self.assertIn('%d' % result.created.year, time.text_content()) + self.assertEqual(result.node.hostname, node.text_content().strip()) + self.assertEqual( + reverse('node-view', args=[result.node.system_id]), + get_one(node.cssselect('a')).get('href')) + self.assertEqual(result.name, output_file.text_content().strip()) + + def test_shows_failure(self): + self.client_log_in(as_admin=True) + factory.make_NodeResult_for_commissioning( + script_result=randint(1, 100)) + content = self.request_page() + result_row = get_one(content.cssselect('.result')) + fields = result_row.cssselect('td') + [script_result, _, _, _] = fields + self.assertEqual("FAILED", script_result.text_content().strip()) + self.assertNotEqual([], script_result.find_class('warning')) + + def test_links_to_result(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_commissioning( + script_result=randint(1, 100)) + content = self.request_page() + result_row = get_one(content.cssselect('.result')) + fields = result_row.cssselect('td') + [script_result, _, _, _] = fields + link = get_one(script_result.cssselect('a')) + self.assertEqual( + reverse('nodecommissionresult-view', args=[result.id]), + link.get('href')) + + def test_groups_by_node(self): + nodes = [factory.make_Node() for _ in range(3)] + # Create two results per node, but interleave them so the results for + # any given node are unlikely to occur side by side by accident. + for _ in range(2): + for node in nodes: + factory.make_NodeResult_for_commissioning(node=node) + sorted_results = self.make_view().get_queryset() + self.assertEqual(sorted_results[0].node, sorted_results[1].node) + self.assertEqual(sorted_results[2].node, sorted_results[3].node) + self.assertEqual(sorted_results[4].node, sorted_results[5].node) + + def test_sorts_by_creation_time_for_same_node(self): + node = factory.make_Node() + results = [ + factory.make_NodeResult_for_commissioning(node=node) + for _ in range(3) + ] + for result in results: + result.created -= factory.make_timedelta() + result.save() + + self.assertEqual( + sorted(results, key=attrgetter('created'), reverse=True), + list(self.make_view().get_queryset())) + + def test_sorts_by_name_for_same_node_and_creation_time(self): + node = factory.make_Node() + results = { + factory.make_NodeResult_for_commissioning( + node=node, name=factory.make_name().lower()) + for _ in range(5) + } + self.assertEqual( + sorted(results, key=attrgetter('name')), + list(self.make_view().get_queryset())) + + def test_filters_by_node(self): + factory.make_NodeResult_for_commissioning() + node = factory.make_Node() + node_results = { + factory.make_NodeResult_for_commissioning(node=node) + for _ in range(3) + } + factory.make_NodeResult_for_commissioning() + + self.assertEqual( + node_results, + set(self.make_view(nodes=[node]).get_queryset())) + + def test_combines_node_filters(self): + # The nodes are passed as GET parameters, which means there is some + # subtlety to how they are passed to the application. Reading them + # naively would ignore all but the first node passed, so make sure we + # process all of them. + self.client_log_in(as_admin=True) + results = [ + factory.make_NodeResult_for_commissioning() + for _ in range(3) + ] + matching_results = results[1:3] + content = self.request_page( + nodes=[result.node for result in matching_results]) + rows = content.cssselect('.result') + self.assertThat(rows, HasLength(len(matching_results))) + matching_names = set() + for row in rows: + [_, name, _, _] = row.cssselect('td') + matching_names.add(name.text_content().strip()) + self.assertEqual( + {result.name for result in matching_results}, + matching_names) + + def test_does_not_show_node_if_not_filtering_by_node(self): + self.client_log_in(as_admin=True) + doc = self.request_page() + header = get_one(doc.cssselect('#results_header')) + self.assertEqual( + "Commissioning results", + normalise_whitespace(header.text_content())) + + def test_shows_node_if_filtering_by_node(self): + self.client_log_in(as_admin=True) + node = factory.make_Node() + doc = self.request_page(nodes=[node]) + header = get_one(doc.cssselect('#results_header')) + self.assertEqual( + "Commissioning results for %s" % node.hostname, + normalise_whitespace(header.text_content())) + + def test_shows_nodes_if_filtering_by_multiple_nodes(self): + self.client_log_in(as_admin=True) + names = [factory.make_name('node').lower() for _ in range(2)] + nodes = [factory.make_Node(hostname=name) for name in names] + doc = self.request_page(nodes=nodes) + header = get_one(doc.cssselect('#results_header')) + self.assertEqual( + "Commissioning results for %s" % ', '.join(sorted(names)), + normalise_whitespace(header.text_content())) + + def test_does_not_list_installation_results(self): + self.client_log_in(as_admin=True) + factory.make_NodeResult_for_installation() + content = self.request_page() + self.assertIsNotNone( + get_one(content.cssselect('#no_results'))) + self.assertEqual([], content.cssselect('.result')) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_nodes.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_nodes.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_nodes.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_nodes.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,12 +14,22 @@ __metaclass__ = type __all__ = [] -import httplib from cgi import escape +import httplib +import json +import logging from operator import attrgetter import os -from random import randint +from random import ( + choice, + randint, + ) from textwrap import dedent +import time +from unittest import ( + skip, + SkipTest, + ) from urlparse import ( parse_qsl, urlparse, @@ -27,22 +37,31 @@ from django.conf import settings from django.core.urlresolvers import reverse +from django.db import transaction +from django.utils import html from lxml.etree import XPath from lxml.html import fromstring +from maasserver import preseed as preseed_module import maasserver.api -from maasserver.third_party_drivers import get_third_party_driver +from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.enum import ( + NODE_BOOT, NODE_STATUS, + NODE_STATUS_CHOICES_DICT, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, + POWER_STATE, ) from maasserver.forms import NodeActionForm from maasserver.models import ( Config, + Event, MACAddress, Node, + node as node_module, ) from maasserver.node_action import ( + AcquireNode, Commission, Delete, StartNode, @@ -51,28 +70,131 @@ get_enlist_preseed, get_preseed, ) +from maasserver.rpc.testing.mixins import PreseedRPCMixin from maasserver.testing import ( extract_redirect, get_content_links, - reload_object, - reload_objects, ) from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.factory import factory +from maasserver.testing.orm import ( + reload_object, + reload_objects, + ) from maasserver.testing.testcase import ( MAASServerTestCase, SeleniumTestCase, ) -from maasserver.utils import map_enum +from maasserver.third_party_drivers import get_third_party_driver from maasserver.utils.orm import get_one from maasserver.views import nodes as nodes_views -from maasserver.views.nodes import message_from_form_stats +from maasserver.views.nodes import ( + event_to_dict, + message_from_form_stats, + node_to_dict, + NodeEventListView, + NodeListView, + NodeView, + ) from maastesting.djangotestcase import count_queries +from metadataserver.enum import RESULT_TYPE from metadataserver.models.commissioningscript import ( - LLDP_OUTPUT_NAME, LIST_MODALIASES_OUTPUT_NAME, + LLDP_OUTPUT_NAME, ) -from testtools.matchers import ContainsAll +from provisioningserver.utils.enum import map_enum +from provisioningserver.utils.text import normalise_whitespace +from testtools.matchers import ( + Contains, + ContainsAll, + Equals, + HasLength, + Not, + ) + + +def normalize_text(text): + return ' '.join(text.split()) + + +class TestHelpers(MAASServerTestCase): + + def test_node_to_dict_keys(self): + node = factory.make_Node(mac=True) + self.assertThat( + node_to_dict(node), + ContainsAll([ + 'id', 'system_id', 'url', 'hostname', 'fqdn', + 'status', 'owner', 'cpu_count', 'memory', 'storage', + 'power_state', 'zone', 'zone_url', 'mac', 'vendor', 'macs'])) + + def test_node_to_dict_values(self): + node = factory.make_Node(mac=True) + dict_node = node_to_dict(node) + self.expectThat(dict_node['id'], Equals(node.id)) + self.expectThat(dict_node['system_id'], Equals(node.system_id)) + self.expectThat( + dict_node['url'], + Equals(reverse('node-view', args=[node.system_id]))) + self.expectThat(dict_node['hostname'], Equals(node.hostname)) + self.expectThat(dict_node['fqdn'], Equals(node.fqdn)) + self.expectThat(dict_node['status'], Equals(node.display_status())) + self.expectThat(dict_node['owner'], Equals('')) + self.expectThat(dict_node['cpu_count'], Equals(node.cpu_count)) + self.expectThat(dict_node['memory'], Equals(node.display_memory())) + self.expectThat(dict_node['storage'], Equals(node.display_storage())) + self.expectThat(dict_node['power_state'], Equals(node.power_state)) + self.expectThat(dict_node['zone'], Equals(node.zone.name)) + self.expectThat( + dict_node['zone_url'], + Equals(reverse('zone-view', args=[node.zone.name]))) + self.expectThat( + dict_node['mac'], + Equals(node.get_pxe_mac().mac_address.get_raw())) + self.expectThat(dict_node['vendor'], Equals(node.get_pxe_mac_vendor())) + self.assertItemsEqual( + dict_node['macs'], + [mac.mac_address.get_raw() for mac in node.get_extra_macs()]) + + def test_node_to_dict_include_events(self): + node = factory.make_Node() + etype = factory.make_EventType(level=logging.INFO) + events = [factory.make_Event(node, etype) for _ in range(4)] + dict_node = node_to_dict(node, event_log_count=2) + self.expectThat(dict_node['events']['total'], Equals(len(events))) + self.expectThat(dict_node['events']['count'], Equals(2)) + self.expectThat(len(dict_node['events']['events']), Equals(2)) + self.expectThat( + dict_node['events']['more_url'], + Equals(reverse('node-event-list-view', args=[node.system_id]))) + + def test_node_to_dict_excludes_debug_events(self): + node = factory.make_Node() + debug_type = factory.make_EventType(level=logging.DEBUG) + debug_event = factory.make_Event(node, debug_type) + + node_dict = node_to_dict(node, event_log_count=2) + self.assertThat( + [event_dict['id'] for event_dict in node_dict['events']['events']], + Not(Contains(debug_event.id))) + + def test_event_to_dict_keys(self): + event = factory.make_Event() + self.assertThat( + event_to_dict(event), + ContainsAll([ + 'id', 'level', 'created', 'type', 'description'])) + + def test_event_to_dict_values(self): + event = factory.make_Event() + dict_event = event_to_dict(event) + self.expectThat(dict_event['id'], Equals(event.id)) + self.expectThat(dict_event['level'], Equals(event.type.level_str)) + self.expectThat( + dict_event['created'], + Equals(event.created.strftime('%a, %d %b. %Y %H:%M:%S'))) + self.expectThat(dict_event['type'], Equals(event.type.description)) + self.expectThat(dict_event['description'], Equals(event.description)) class TestGenerateJSPowerTypes(MAASServerTestCase): @@ -130,9 +252,23 @@ consumer, token = profile.create_authorisation_token() self.patch(maasserver.api, 'get_oauth_token', lambda request: token) + def get_node_list_ajax(self, ids=None): + """Get result of AJAX request for node list.""" + url = reverse('node-list') + if ids is not None and len(ids) > 0: + url += '?id=' + '&id='.join(ids) + return self.client.get( + url, HTTP_X_REQUESTED_WITH='XMLHttpRequest') + + def get_node_view_ajax(self, node): + """Get result of AJAX request for node view.""" + url = reverse('node-view', args=[node.system_id]) + return self.client.get( + url, HTTP_X_REQUESTED_WITH='XMLHttpRequest') + def test_node_list_contains_link_to_node_view(self): self.client_log_in() - node = factory.make_node() + node = factory.make_Node() response = self.client.get(reverse('node-list')) node_link = reverse('node-view', args=[node.system_id]) self.assertIn(node_link, get_content_links(response)) @@ -146,18 +282,24 @@ def test_node_list_contains_column_sort_links(self): # Just create a node to have something in the list self.client_log_in() - factory.make_node() + factory.make_Node() response = self.client.get(reverse('node-list')) - sort_hostname = '?sort=hostname&dir=asc' - sort_status = '?sort=status&dir=asc' - sort_zone = '?sort=zone&dir=asc' - self.assertIn(sort_hostname, get_content_links(response)) - self.assertIn(sort_status, get_content_links(response)) - self.assertIn(sort_zone, get_content_links(response)) + sort_fields = ( + NodeListView.sort_fields + + tuple(NodeListView.late_sort_fields.keys()) + ) + expected_sort_fields = [ + 'hostname', 'status', 'owner', 'cpu_count', 'memory', 'storage', + 'primary_mac', 'zone' + ] + self.assertItemsEqual(expected_sort_fields, sort_fields) + sorting_links = ['?sort=%s&dir=asc' % field for field in sort_fields] + self.assertThat( + get_content_links(response), ContainsAll(sorting_links)) def test_node_list_ignores_unknown_sort_param(self): self.client_log_in() - factory.make_node() + factory.make_Node() response = self.client.get( reverse('node-list'), {'sort': 'unknown', 'dir': 'asc'}) # No error: the unknown sorting parameter was ignored. @@ -166,18 +308,18 @@ def test_node_list_lists_nodes_from_different_nodegroups(self): # Bug 1084443. self.client_log_in() - nodegroup1 = factory.make_node_group() - nodegroup2 = factory.make_node_group() - factory.make_node(nodegroup=nodegroup1) - factory.make_node(nodegroup=nodegroup2) - factory.make_node(nodegroup=nodegroup2) + nodegroup1 = factory.make_NodeGroup() + nodegroup2 = factory.make_NodeGroup() + factory.make_Node(nodegroup=nodegroup1) + factory.make_Node(nodegroup=nodegroup2) + factory.make_Node(nodegroup=nodegroup2) response = self.client.get(reverse('node-list')) self.assertEqual(httplib.OK, response.status_code) def test_node_list_sorts_by_hostname(self): self.client_log_in() names = ['zero', 'one', 'five'] - nodes = [factory.make_node(hostname=n) for n in names] + nodes = [factory.make_Node(hostname=n) for n in names] # First check the ascending sort order sorted_nodes = sorted(nodes, key=attrgetter('hostname')) @@ -209,10 +351,10 @@ self.client_log_in() statuses = { NODE_STATUS.READY, - NODE_STATUS.DECLARED, - NODE_STATUS.FAILED_TESTS, + NODE_STATUS.NEW, + NODE_STATUS.FAILED_COMMISSIONING, } - nodes = [factory.make_node(status=s) for s in statuses] + nodes = [factory.make_Node(status=s) for s in statuses] # First check the ascending sort order sorted_nodes = sorted(nodes, key=attrgetter('status')) @@ -240,10 +382,42 @@ [link for link in get_content_links(response) if link.startswith('/nodes/node')]) + def test_node_list_sorts_by_primary_mac(self): + self.client_log_in() + [factory.make_Node(mac=True) for _ in range(8)] + + # First check the ascending sort order. + node_macs = sorted( + (unicode(n.get_primary_mac()), n) for n in Node.objects.all()) + sorted_nodes = [item[1] for item in node_macs] + response = self.client.get( + reverse('node-list'), { + 'sort': 'primary_mac', + 'dir': 'asc'}) + node_links = [ + reverse('node-view', args=[node.system_id]) + for node in sorted_nodes + ] + self.assertEqual( + node_links, + [link for link in get_content_links(response) + if link.startswith('/nodes/node')]) + + # Now check the reverse order. + node_links = list(reversed(node_links)) + response = self.client.get( + reverse('node-list'), { + 'sort': 'primary_mac', + 'dir': 'desc'}) + self.assertEqual( + node_links, + [link for link in get_content_links(response) + if link.startswith('/nodes/node')]) + def test_node_list_sorts_by_zone(self): self.client_log_in() - zones = [factory.make_zone(sortable_name=True) for _ in range(5)] - nodes = [factory.make_node(zone=zone) for zone in zones] + zones = [factory.make_Zone(sortable_name=True) for _ in range(5)] + nodes = [factory.make_Node(zone=zone) for zone in zones] # We use PostgreSQL's case-insensitive text sorting algorithm. sorted_nodes = sorted( @@ -281,9 +455,9 @@ self.patch(nodes_views.NodeListView, 'paginate_by', page_size) nodes = [] - tag = factory.make_tag('shiny') + tag = factory.make_Tag('shiny') for name in ('bbb', 'ccc', 'ddd', 'aaa'): - node = factory.make_node(hostname=name) + node = factory.make_Node(hostname=name) node.tags = [tag] nodes.append(node) @@ -309,31 +483,31 @@ def test_node_list_displays_fqdn_dns_not_managed(self): self.client_log_in() - nodes = [factory.make_node() for i in range(3)] + nodes = [factory.make_Node() for _ in range(3)] response = self.client.get(reverse('node-list')) node_fqdns = [node.fqdn for node in nodes] self.assertThat(response.content, ContainsAll(node_fqdns)) def test_node_list_displays_fqdn_dns_managed(self): self.client_log_in() - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - nodes = [factory.make_node(nodegroup=nodegroup) for i in range(3)] + nodes = [factory.make_Node(nodegroup=nodegroup) for _ in range(3)] response = self.client.get(reverse('node-list')) node_fqdns = [node.fqdn for node in nodes] self.assertThat(response.content, ContainsAll(node_fqdns)) def test_node_list_displays_zone(self): self.client_log_in() - node = factory.make_node() + node = factory.make_Node() response = self.client.get(reverse('node-list')) [zone_field] = fromstring(response.content).cssselect('.zone-column') self.assertEqual(node.zone.name, zone_field.text_content().strip()) def test_node_list_links_to_zone(self): self.client_log_in() - node = factory.make_node() + node = factory.make_Node() response = self.client.get(reverse('node-list')) zone_link = reverse('zone-view', args=[node.zone.name]) self.assertEqual( @@ -343,12 +517,12 @@ def test_node_list_displays_sorted_list_of_nodes(self): # Nodes are sorted on the node list page, newest first. self.client_log_in() - nodes = [factory.make_node() for i in range(3)] + nodes = [factory.make_Node() for _ in range(3)] # Explicitely set node.created since all of these node will # be created in the same transaction and thus have the same # 'created' value by default. for node in nodes: - created = factory.getRandomDate() + created = factory.make_date() # Update node.created without calling node.save(). Node.objects.filter(id=node.id).update(created=created) nodes = reload_objects(Node, nodes) @@ -363,6 +537,9 @@ if link.startswith('/nodes/node')]) def test_node_list_num_queries_is_independent_of_num_nodes(self): + # XXX: GavinPanella 2014-10-03 bug=1377335 + self.skip("Unreliable; something is causing varying counts.") + # Listing nodes takes a constant number of database queries, # regardless of how many nodes are in the listing. self.client_log_in() @@ -378,9 +555,9 @@ def make_nodes(nodegroup, number): """Create `number` new nodes.""" for counter in range(number): - factory.make_node(nodegroup=nodegroup, mac=True) + factory.make_Node(nodegroup=nodegroup, mac=True) - nodegroup = factory.make_node_group() + nodegroup = factory.make_NodeGroup() make_nodes(nodegroup, 10) url = reverse('node-list') @@ -397,10 +574,43 @@ self.assertEqual(20, count_node_links(response)) self.assertEqual(num_queries, num_bonus_queries) + def test_node_list_ajax_returns_json(self): + self.client_log_in() + response = self.get_node_list_ajax() + self.assertEqual('application/json', response['Content-Type']) + + def test_node_list_ajax_returns_empty_list_when_no_ids(self): + self.client_log_in() + response = self.get_node_list_ajax() + json_obj = json.loads(response.content) + self.assertEqual([], json_obj) + + def test_node_list_ajax_returns_node_info(self): + self.client_log_in() + nodes = [factory.make_Node() for _ in range(3)] + ids = [node.system_id for node in nodes] + response = self.get_node_list_ajax(ids=ids) + json_obj = json.loads(response.content) + self.assertItemsEqual( + [node_to_dict(node) for node in nodes], + json_obj) + + def test_node_list_displays_pxe_mac_if_available(self): + node = factory.make_Node() + [factory.make_MACAddress(node=node) for _ in range(3)] + node.pxe_mac = node.macaddress_set.last() + node.save() + + self.client_log_in() + response = self.client.get(reverse('node-list')) + document = fromstring(response.content) + [mac_element] = document.xpath('//span[@data-field="mac"]') + self.expectThat(mac_element.text, Equals(node.pxe_mac.mac_address)) + def test_view_node_displays_node_info(self): # The node page features the basic information about the node. self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node.cpu_count = 123 node.memory = 512 node.save() @@ -411,15 +621,15 @@ self.assertIn(node.hostname, content_text) self.assertIn(node.display_status(), content_text) self.assertIn(node.architecture, content_text) - self.assertIn('%d MB' % (node.memory,), content_text) + self.assertIn('%s GiB' % (node.display_memory(),), content_text) self.assertIn('%d' % (node.cpu_count,), content_text) self.assertIn(self.logged_in_user.username, content_text) def test_view_node_contains_tag_names(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) - tag_a = factory.make_tag() - tag_b = factory.make_tag() + node = factory.make_Node(owner=self.logged_in_user) + tag_a = factory.make_Tag() + tag_b = factory.make_Tag() node.tags.add(tag_a) node.tags.add(tag_b) node_link = reverse('node-view', args=[node.system_id]) @@ -434,38 +644,67 @@ def test_view_node_contains_ip_addresses(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user, disable_ipv4=False) nodegroup = node.nodegroup macs = [ - factory.make_mac_address(node=node).mac_address for i in range(2)] - ips = [factory.getRandomIPAddress() for i in range(2)] - for i in range(2): - factory.make_dhcp_lease( - nodegroup=nodegroup, mac=macs[i], ip=ips[i]) + factory.make_MACAddress(node=node).mac_address + for _ in range(2) + ] + ips = [factory.make_ipv4_address() for _ in range(2)] + for mac, ip in zip(macs, ips): + factory.make_DHCPLease(nodegroup=nodegroup, mac=mac, ip=ip) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) self.assertThat(response.content, ContainsAll(ips)) def test_view_node_does_not_contain_ip_addresses_if_no_lease(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user, disable_ipv4=False) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) self.assertNotIn("IP addresses", response.content) + def test_view_node_warns_about_unconfigured_IPv6_addresses(self): + self.client_log_in() + ipv6_network = factory.make_ipv6_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + owner=self.logged_in_user, network=ipv6_network, + osystem='windows') + factory.make_StaticIPAddress( + ip=factory.pick_ip_in_network(ipv6_network), + mac=node.get_primary_mac()) + node_link = reverse('node-view', args=[node.system_id]) + page = fromstring(self.client.get(node_link).content) + [addresses_section] = page.cssselect('#ip-addresses') + self.expectThat( + addresses_section.cssselect('#unconfigured-ips-warning'), + Not(HasLength(0))) + + def test_view_node_does_not_warn_if_no_unconfigured_IPv6_addresses(self): + self.client_log_in() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + owner=self.logged_in_user) + factory.make_StaticIPAddress(mac=node.get_primary_mac()) + node_link = reverse('node-view', args=[node.system_id]) + page = fromstring(self.client.get(node_link).content) + [addresses_section] = page.cssselect('#ip-addresses') + self.assertEqual( + [], + addresses_section.cssselect('#unconfigured-ip-warning')) + def test_view_node_displays_node_info_no_owner(self): - # If the node has no owner, the Owner 'slot' does not exist. + # If the node has no owner, the Owner 'slot' is hidden. self.client_log_in() - node = factory.make_node() + node = factory.make_Node() node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) doc = fromstring(response.content) - content_text = doc.cssselect('#content')[0].text_content() - self.assertNotIn('Owner', content_text) + [owner_div] = doc.cssselect('#owner') + self.assertIn('hidden', owner_div.attrib['class']) def test_view_node_displays_link_to_view_preseed(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) node_preseed_link = reverse('node-preseed-view', args=[node.system_id]) @@ -473,7 +712,7 @@ def test_view_node_displays_no_routers_if_no_routers_discovered(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user, routers=[]) + node = factory.make_Node(owner=self.logged_in_user, routers=[]) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) @@ -486,7 +725,7 @@ def test_view_node_displays_routers_if_any(self): self.client_log_in() router = factory.make_MAC() - node = factory.make_node(owner=self.logged_in_user, routers=[router]) + node = factory.make_Node(owner=self.logged_in_user, routers=[router]) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) @@ -499,7 +738,7 @@ def test_view_node_separates_routers_by_comma(self): self.client_log_in() routers = [factory.make_MAC(), factory.make_MAC()] - node = factory.make_node(owner=self.logged_in_user, routers=routers) + node = factory.make_Node(owner=self.logged_in_user, routers=routers) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) @@ -513,7 +752,7 @@ def test_view_node_links_to_physical_zone(self): self.client_log_in() - node = factory.make_node() + node = factory.make_Node() node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) @@ -529,7 +768,7 @@ def test_view_node_shows_macs(self): self.client_log_in() - mac = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() response = self.client.get( reverse('node-view', args=[mac.node.system_id])) @@ -540,11 +779,18 @@ [listing] = get_one(interfaces_section.cssselect('span')) self.assertEqual(mac.mac_address, listing.text_content().strip()) - def test_view_node_lists_macs_as_list_items(self): + def test_view_node_lists_macs_as_sorted_list_items(self): + # The PXE mac is listed first on the node view page. self.client_log_in() - node = factory.make_node() - factory.make_mac_address('11:11:11:11:11:11', node=node) - factory.make_mac_address('22:22:22:22:22:22', node=node) + node = factory.make_Node() + + macs = [ + factory.make_MACAddress(node=node) + for _ in range(4) + ] + pxe_mac_index = 2 + node.pxe_mac = macs[pxe_mac_index] + node.save() response = self.client.get(reverse('node-view', args=[node.system_id])) self.assertEqual(httplib.OK, response.status_code) @@ -553,14 +799,18 @@ '#network-interfaces') [interfaces_list] = interfaces_section.cssselect('ul') interfaces = interfaces_list.cssselect('li') + sorted_macs = ( + [macs[pxe_mac_index]] + + macs[:pxe_mac_index] + macs[pxe_mac_index + 1:] + ) self.assertEqual( - ['11:11:11:11:11:11', '22:22:22:22:22:22'], + [mac.mac_address.get_raw() for mac in sorted_macs], [interface.text_content().strip() for interface in interfaces]) def test_view_node_links_network_interfaces_to_networks(self): self.client_log_in() - network = factory.make_network() - mac = factory.make_mac_address(networks=[network]) + network = factory.make_Network() + mac = factory.make_MACAddress_with_Node(networks=[network]) response = self.client.get( reverse('node-view', args=[mac.node.system_id])) @@ -572,7 +822,7 @@ [interface] = interfaces_list.cssselect('li') self.assertEqual( "%s (on %s)" % (mac.mac_address, network.name), - ' '.join(interface.text_content().split())) + normalise_whitespace(interface.text_content())) [link] = interface.cssselect('a') self.assertEqual(network.name, link.text_content().strip()) self.assertEqual( @@ -581,8 +831,8 @@ def test_view_node_sorts_networks_by_name(self): self.client_log_in() - networks = factory.make_networks(3, sortable_name=True) - mac = factory.make_mac_address(networks=networks) + networks = factory.make_Networks(3, sortable_name=True) + mac = factory.make_MACAddress_with_Node(networks=networks) response = self.client.get( reverse('node-view', args=[mac.node.system_id])) @@ -595,11 +845,11 @@ [interface] = interfaces_list.cssselect('li') self.assertEqual( "%s (on %s)" % (mac.mac_address, ', '.join(sorted_names)), - ' '.join(interface.text_content().split())) + normalise_whitespace(interface.text_content())) def test_view_node_displays_link_to_edit_if_user_owns_node(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) node_edit_link = reverse('node-edit', args=[node.system_id]) @@ -608,7 +858,7 @@ def test_view_node_does_not_show_link_to_delete_node(self): # Only admin users can delete nodes. self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) node_delete_link = reverse('node-delete', args=[node.system_id]) @@ -616,7 +866,7 @@ def test_user_cannot_delete_node(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) node_delete_link = reverse('node-delete', args=[node.system_id]) response = self.client.get(node_delete_link) self.assertEqual(httplib.FORBIDDEN, response.status_code) @@ -627,7 +877,7 @@ NODE_STATUS.READY, NODE_STATUS.COMMISSIONING) help_link = "https://maas.ubuntu.com/docs/nodes.html" for status in map_enum(NODE_STATUS).values(): - node = factory.make_node(status=status) + node = factory.make_Node(status=status) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(node_link) links = get_content_links(response, '#flash-messages') @@ -636,78 +886,144 @@ else: self.assertNotIn(help_link, links) + def test_view_node_ajax_returns_json(self): + self.client_log_in() + node = factory.make_Node() + response = self.get_node_view_ajax(node) + self.assertEqual('application/json', response['Content-Type']) + + def test_view_node_ajax_returns_node_info(self): + self.client_log_in() + node = factory.make_Node() + response = self.get_node_view_ajax(node) + json_obj = json.loads(response.content) + del json_obj['action_view'] + self.assertEquals( + node_to_dict( + node, + event_log_count=NodeView.number_of_events_shown), + json_obj) + + def test_view_node_ajax_returns_action_view(self): + self.client_log_in() + node = factory.make_Node() + response = self.get_node_view_ajax(node) + json_obj = json.loads(response.content) + self.assertThat( + json_obj['action_view'], + Contains('

    Node details pre')) + doc.cssselect('#xml > pre')) + self.assertDocTestMatches(expected_content, observed_content) + + def test_view_node_shows_probed_details_yaml_output_if_set(self): + self.client_log_in() + node = factory.make_Node(owner=self.logged_in_user) + lldp_data = "bar".encode("utf-8") + factory.make_NodeResult_for_commissioning( + node=node, name=LLDP_OUTPUT_NAME, script_result=0, data=lldp_data) + node_link = reverse('node-view', args=[node.system_id]) + + response = self.client.get(node_link) + self.assertEqual(httplib.OK, response.status_code) + + doc = fromstring(response.content) + expected_content = dedent("""\ + - list: + - lldp:foo: + bar + """) + # We expect only one matched element, so this join is + # defensive, and gives better output on failure. + observed_content = "\n---\n".join( + element.text for element in + doc.cssselect('#yaml > pre')) self.assertDocTestMatches(expected_content, observed_content) def test_view_node_POST_commission(self): self.client_log_in(as_admin=True) - node = factory.make_node(status=NODE_STATUS.READY) + node = factory.make_Node(status=NODE_STATUS.READY) node_link = reverse('node-view', args=[node.system_id]) response = self.client.post( node_link, data={NodeActionForm.input_name: Commission.name}) @@ -899,56 +1265,15 @@ def test_view_node_POST_action_displays_message(self): self.client_log_in() - factory.make_sshkey(self.logged_in_user) + factory.make_SSHKey(self.logged_in_user) self.set_up_oauth_token() - node = factory.make_node(status=NODE_STATUS.READY) + node = factory.make_Node(status=NODE_STATUS.READY) response = self.perform_action_and_get_node_page( - node, StartNode.name) + node, AcquireNode.name) self.assertIn( "This node is now allocated to you.", '\n'.join(msg.message for msg in response.context['messages'])) - def test_node_list_query_includes_current(self): - self.client_log_in() - qs = factory.getRandomString() - response = self.client.get(reverse('node-list'), {"query": qs}) - query_value = fromstring(response.content).xpath( - "string(//div[@id='nodes']//input[@name='query']/@value)") - self.assertIn(qs, query_value) - - def test_node_list_query_error_on_missing_tag(self): - self.client_log_in() - response = self.client.get( - reverse('node-list'), {"query": "maas-tags=missing"}) - error_string = fromstring(response.content).xpath( - "string(//div[@id='nodes']//p[@class='form-errors'])") - self.assertIn("No such tag(s): 'missing'", error_string) - - def test_node_list_query_error_on_unknown_constraint(self): - self.client_log_in() - response = self.client.get( - reverse('node-list'), {"query": "color=red"}) - error_string = fromstring(response.content).xpath( - "string(//div[@id='nodes']//p[@class='form-errors'])") - self.assertEqual("color: No such constraint.", error_string.strip()) - - def test_node_list_query_selects_subset(self): - self.client_log_in() - tag = factory.make_tag("shiny") - node1 = factory.make_node(cpu_count=1) - node2 = factory.make_node(cpu_count=2) - node3 = factory.make_node(cpu_count=2) - node1.tags = [tag] - node2.tags = [tag] - node3.tags = [] - response = self.client.get( - reverse('node-list'), {"query": "maas-tags=shiny cpu=2"}) - node2_link = reverse('node-view', args=[node2.system_id]) - document = fromstring(response.content) - node_links = document.xpath( - "//div[@id='nodes']/form/table/tr/td[2]/a/@href") - self.assertEqual([node2_link], node_links) - def test_node_list_paginates(self): """Node listing is split across multiple pages with links""" self.client_log_in() @@ -956,7 +1281,7 @@ page_size = 2 self.patch(nodes_views.NodeListView, 'paginate_by', page_size) nodes = [ - factory.make_node(created="2012-10-12 12:00:%02d" % i) + factory.make_Node(created="2012-10-12 12:00:%02d" % i) for i in range(page_size * 2 + 1) ] # Order node links with newest first as the view is expected to @@ -965,7 +1290,7 @@ for node in reversed(nodes) ] expr_node_links = XPath( - "//div[@id='nodes']/form/table/tr/td[2]/a/@href") + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") expr_page_anchors = XPath("//div[@class='pagination']//a") # Fetch first page, should link newest two nodes and page 2 response = self.client.get(reverse('node-list')) @@ -995,41 +1320,11 @@ [(a.text.lower(), a.get("href")) for a in expr_page_anchors(page3)]) - def test_node_list_query_paginates(self): - """Node list query subset is split across multiple pages with links""" - self.client_log_in() - # Set a very small page size to save creating lots of nodes - self.patch(nodes_views.NodeListView, 'paginate_by', 2) - nodes = [ - factory.make_node(created="2012-10-12 12:00:%02d" % i) - for i in range(10)] - tag = factory.make_tag("odd") - for node in nodes[::2]: - node.tags = [tag] - last_node_link = reverse('node-view', args=[nodes[0].system_id]) - response = self.client.get( - reverse('node-list'), - {"query": "maas-tags=odd", "page": 3}) - document = fromstring(response.content) - self.assertIn("5 matching nodes", document.xpath("string(//h1)")) - self.assertEqual( - [last_node_link], - document.xpath("//div[@id='nodes']/form/table/tr/td[2]/a/@href")) - self.assertEqual( - [ - ("first", "?query=maas-tags%3Dodd"), - ("previous", "?query=maas-tags%3Dodd&page=2") - ], - [ - (a.text.lower(), a.get("href")) - for a in document.xpath("//div[@class='pagination']//a") - ]) - def test_node_list_performs_bulk_action(self): self.client_log_in(as_admin=True) - node1 = factory.make_node() - node2 = factory.make_node() - node3 = factory.make_node() + node1 = factory.make_Node() + node2 = factory.make_Node() + node3 = factory.make_Node() system_id_to_delete = [node1.system_id, node2.system_id] response = self.client.post( reverse('node-list'), @@ -1053,7 +1348,7 @@ def test_node_list_post_form_preserves_get_params(self): self.client_log_in() - factory.make_node() + factory.make_Node() params = { "dir": "desc", "query": factory.make_name("query"), @@ -1069,7 +1364,7 @@ def test_node_list_view_shows_third_party_drivers_warning(self): self.client_log_in() - factory.make_node() + factory.make_Node() Config.objects.set_config( name='enable_third_party_drivers', value=True) response = self.client.get(reverse('node-list')) @@ -1081,7 +1376,7 @@ def test_node_list_view_shows_third_party_drivers_admin_warning(self): self.client_log_in(as_admin=True) - factory.make_node() + factory.make_Node() Config.objects.set_config( name='enable_third_party_drivers', value=True) response = self.client.get(reverse('node-list')) @@ -1094,7 +1389,7 @@ def test_node_list_view_hides_drivers_warning_if_drivers_disabled(self): self.client_log_in() - factory.make_node() + factory.make_Node() Config.objects.set_config( name='enable_third_party_drivers', value=False) response = self.client.get(reverse('node-list')) @@ -1106,7 +1401,7 @@ self.client_log_in() Config.objects.set_config( name='enable_third_party_drivers', value=True) - node = factory.make_node(status=NODE_STATUS.READY) + node = factory.make_Node(status=NODE_STATUS.READY) response = self.client.get(reverse('node-view', args=[node.system_id])) self.assertNotIn("Third Party Drivers", response.content) @@ -1114,9 +1409,9 @@ self.client_log_in() Config.objects.set_config( name='enable_third_party_drivers', value=True) - node = factory.make_node(status=NODE_STATUS.READY) + node = factory.make_Node(status=NODE_STATUS.READY) data = "pci:v00001590d00000047sv00001590sd00000047bc*sc*i*" - factory.make_node_commission_result( + factory.make_NodeResult_for_commissioning( node=node, name=LIST_MODALIASES_OUTPUT_NAME, script_result=0, data=data.encode("utf-8")) response = self.client.get(reverse('node-view', args=[node.system_id])) @@ -1130,15 +1425,491 @@ self.client_log_in() Config.objects.set_config( name='enable_third_party_drivers', value=False) - node = factory.make_node(status=NODE_STATUS.READY) + node = factory.make_Node(status=NODE_STATUS.READY) data = "pci:v00001590d00000047sv00001590sd00000047bc*sc*i*" - factory.make_node_commission_result( + factory.make_NodeResult_for_commissioning( node=node, name=LIST_MODALIASES_OUTPUT_NAME, script_result=0, data=data.encode("utf-8")) response = self.client.get(reverse('node-view', args=[node.system_id])) self.assertNotIn("Third Party Drivers", response.content) +class TestNodesViewSearch(MAASServerTestCase): + + def test_node_list_search_query_includes_current_clause(self): + self.client_log_in() + qs = factory.make_string() + response = self.client.get(reverse('node-list'), {"query": qs}) + query_value = fromstring(response.content).xpath( + "string(//div[@id='nodes']//input[@name='query']/@value)") + self.assertIn(qs, query_value) + + def test_node_list_search_query_finds_by_all_fields_hostname(self): + self.client_log_in() + [factory.make_Node() for i in range(10)] + hostname = factory.make_name("hostname") + node = factory.make_Node(hostname=hostname) + node2 = factory.make_Node(hostname=hostname[:-1]) + node_link = reverse('node-view', args=[node.system_id]) + node2_link = reverse('node-view', args=[node2.system_id]) + response = self.client.get(reverse('node-list'), {"query": hostname}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.expectThat(node_links, Equals([node_link])) + + # A substring search also matches. Here it also picks up the + # other node with the exact name match. + response = self.client.get( + reverse('node-list'), {"query": hostname[:-1]}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link, node2_link]) + + def test_node_list_search_query_finds_by_field_hostname(self): + self.client_log_in() + [factory.make_Node() for i in range(10)] + hostname = factory.make_name("hostname") + node = factory.make_Node(hostname=hostname) + node2 = factory.make_Node(hostname=hostname[:-1]) + node_link = reverse('node-view', args=[node.system_id]) + node2_link = reverse('node-view', args=[node2.system_id]) + response = self.client.get(reverse('node-list'), {"query": hostname}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.expectThat(node_links, Equals([node_link])) + + # A substring search also matches. Here it also picks up the + # other node with the exact name match. + response = self.client.get( + reverse('node-list'), {"query": "hostname:%s" % hostname[:-1]}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link, node2_link]) + + def test_node_list_search_query_finds_by_all_fields_arch(self): + self.client_log_in() + amd64_node = factory.make_Node(architecture="amd64/generic") + factory.make_Node(architecture="i386/generic") + factory.make_Node(architecture="armhf/generic") + + node_link = reverse('node-view', args=[amd64_node.system_id]) + response = self.client.get( + reverse('node-list'), {"query": "amd64"}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_field_arch(self): + self.client_log_in() + amd64_node = factory.make_Node(architecture="amd64/generic") + factory.make_Node(architecture="i386/generic") + factory.make_Node(architecture="armhf/generic") + + node_link = reverse('node-view', args=[amd64_node.system_id]) + response = self.client.get( + reverse('node-list'), {"query": "arch:amd64"}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_field_power_state(self): + self.client_log_in() + power_on_node = factory.make_Node(power_state=POWER_STATE.ON) + factory.make_Node(power_state=POWER_STATE.OFF) + factory.make_Node(power_state=POWER_STATE.UNKNOWN) + + node_link = reverse('node-view', args=[power_on_node.system_id]) + response = self.client.get( + reverse('node-list'), {"query": "power:on"}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_all_fields_full_mac(self): + self.client_log_in() + node = factory.make_Node(mac=True) + factory.make_Node(mac=True) + factory.make_Node(mac=True) + + node_link = reverse('node-view', args=[node.system_id]) + response = self.client.get( + reverse('node-list'), {"query": '%s' % node.get_pxe_mac()}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_all_fields_partial_mac(self): + self.client_log_in() + node = factory.make_Node(mac=True) + factory.make_Node(mac=True) + factory.make_Node(mac=True) + + node_link = reverse('node-view', args=[node.system_id]) + mac = '%s' % node.get_pxe_mac() + response = self.client.get( + reverse('node-list'), {"query": mac[:8]}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_field_mac_partial_mac(self): + self.client_log_in() + node = factory.make_Node(mac=True) + factory.make_Node(mac=True) + factory.make_Node(mac=True) + + node_link = reverse('node-view', args=[node.system_id]) + mac = '%s' % node.get_pxe_mac() + response = self.client.get( + reverse('node-list'), {"query": 'mac:%s' % mac[:5]}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_all_fields_status(self): + self.client_log_in() + node = factory.make_Node(status=NODE_STATUS.ALLOCATED) + factory.make_Node(status=NODE_STATUS.DEPLOYING) + factory.make_Node(status=NODE_STATUS.DEPLOYED) + + node_link = reverse('node-view', args=[node.system_id]) + response = self.client.get( + reverse('node-list'), + {"query": NODE_STATUS_CHOICES_DICT[NODE_STATUS.ALLOCATED]}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_field_status(self): + self.client_log_in() + node = factory.make_Node(status=NODE_STATUS.DEPLOYED) + factory.make_Node(status=NODE_STATUS.DEPLOYING) + factory.make_Node(status=NODE_STATUS.ALLOCATED) + + node_link = reverse('node-view', args=[node.system_id]) + status_text = NODE_STATUS_CHOICES_DICT[NODE_STATUS.DEPLOYED] + response = self.client.get( + reverse('node-list'), + {"query": 'status:%s' % status_text}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node_link]) + + def test_node_list_search_query_finds_by_field_status_patial_deploy(self): + self.client_log_in() + node1 = factory.make_Node(status=NODE_STATUS.DEPLOYED) + node2 = factory.make_Node(status=NODE_STATUS.DEPLOYING) + factory.make_Node(status=NODE_STATUS.ALLOCATED) + + node1_link = reverse('node-view', args=[node1.system_id]) + node2_link = reverse('node-view', args=[node2.system_id]) + response = self.client.get( + reverse('node-list'), + {"query": 'status:deploy'}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node1_link, node2_link]) + + def test_node_list_search_query_finds_by_all_fields_for_miss_field(self): + self.client_log_in() + node1 = factory.make_Node(status=NODE_STATUS.DEPLOYED) + node2 = factory.make_Node(status=NODE_STATUS.DEPLOYING) + factory.make_Node(status=NODE_STATUS.ALLOCATED) + + node1_link = reverse('node-view', args=[node1.system_id]) + node2_link = reverse('node-view', args=[node2.system_id]) + response = self.client.get( + reverse('node-list'), + {"query": ':deploy'}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, [node1_link, node2_link]) + + def test_node_list_search_query_returns_empty_for_missing_field(self): + self.client_log_in() + for _ in range(3): + factory.make_Node() + response = self.client.get( + reverse('node-list'), + {"query": ':deploy'}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(node_links, []) + + def test_node_list_search_doesnt_show_duplicates(self): + # Unioning several query sets can result in dupes, check that + # the query uses distinct. + self.client_log_in() + node = factory.make_Node( + hostname="arthur", architecture="amd64/generic") + node.tags = [factory.make_Tag() for i in range(3)] + node.save() + nodes = [node] + nodes.append(factory.make_Node( + hostname="andrew", architecture="i386/generic")) + response = self.client.get(reverse('node-list'), {"query": "a"}) + document = fromstring(response.content) + expected_node_links = [ + reverse('node-view', args=[n.system_id]) for n in nodes] + doc_node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertItemsEqual(expected_node_links, doc_node_links) + + def test_node_list_search_query_finds_by_tags(self): + self.client_log_in() + nodes = [factory.make_Node() for i in range(10)] + tag = factory.make_Tag("odd") + for node in nodes[::2]: + node.tags = [tag] + response = self.client.get( + reverse('node-list'), {"query": "odd"}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + expected_node_links = [ + reverse('node-view', args=[node.system_id]) + for node in nodes[::2]] + self.expectThat(node_links, ContainsAll(expected_node_links)) + + # Substrings match tags too. + response = self.client.get( + reverse('node-list'), {"query": "od"}) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.expectThat(node_links, ContainsAll(expected_node_links)) + + def test_node_list_search_query_paginates(self): + """Node list query subset is split across multiple pages with links""" + self.client_log_in() + # Set a very small page size to save creating lots of nodes + self.patch(nodes_views.NodeListView, 'paginate_by', 2) + nodes = [ + factory.make_Node(created="2012-10-12 12:00:%02d" % i) + for i in range(10)] + tag = factory.make_Tag("odd") + for node in nodes[::2]: + node.tags = [tag] + last_node_link = reverse('node-view', args=[nodes[0].system_id]) + response = self.client.get( + reverse('node-list'), {"query": "odd", "page": 3}) + document = fromstring(response.content) + self.expectThat( + document.xpath("string(//h1)"), Contains("5 matching nodes")) + self.expectThat( + [last_node_link], + Equals( + document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href"))) + self.assertEqual( + [ + ("first", "?query=odd"), + ("previous", "?query=odd&page=2") + ], + [ + (a.text.lower(), a.get("href")) + for a in document.xpath("//div[@class='pagination']//a") + ]) + + def test_node_list_query_constraint_includes_current(self): + self.client_log_in() + qs = factory.make_string() + response = self.client.get(reverse('node-list'), {"query": qs}) + query_value = fromstring(response.content).xpath( + "string(//div[@id='nodes']//input[@name='query']/@value)") + self.assertIn(qs, query_value) + + def test_node_list_query_constraint_error_on_missing_tag(self): + self.client_log_in() + response = self.client.get( + reverse('node-list'), {"query": "maas-tags=missing"}) + error_string = fromstring(response.content).xpath( + "string(//div[@id='nodes']//p[@class='form-errors'])") + self.assertIn("No such tag(s): 'missing'", error_string) + + def test_node_list_query_constraint_error_on_unknown_constraint(self): + self.client_log_in() + response = self.client.get( + reverse('node-list'), {"query": "color=red"}) + error_string = fromstring(response.content).xpath( + "string(//div[@id='nodes']//p[@class='form-errors'])") + self.assertEqual("color: No such constraint.", error_string.strip()) + + def test_node_list_query_constraint_selects_subset(self): + self.client_log_in() + tag = factory.make_Tag("shiny") + node1 = factory.make_Node(cpu_count=1) + node2 = factory.make_Node(cpu_count=2) + node3 = factory.make_Node(cpu_count=2) + node1.tags = [tag] + node2.tags = [tag] + node3.tags = [] + response = self.client.get( + reverse('node-list'), {"query": "maas-tags=shiny cpu=2"}) + node2_link = reverse('node-view', args=[node2.system_id]) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertEqual([node2_link], node_links) + + def test_node_list_query_constraint_selects_subset_ignoring_others(self): + self.client_log_in() + tag = factory.make_Tag("shiny") + node1 = factory.make_Node(cpu_count=1) + node2 = factory.make_Node(cpu_count=2) + node3 = factory.make_Node(cpu_count=2) + node1.tags = [tag] + node2.tags = [tag] + node3.tags = [] + response = self.client.get( + reverse('node-list'), + {"query": "maas-tags=shiny cpu=2 status:new"}) + node2_link = reverse('node-view', args=[node2.system_id]) + document = fromstring(response.content) + node_links = document.xpath( + "//div[@id='nodes']/form/table/tr/td[3]/a/@href") + self.assertEqual([node2_link], node_links) + + def test_node_list_query_constraint_paginates(self): + """Node list query subset is split across multiple pages with links""" + self.client_log_in() + # Set a very small page size to save creating lots of nodes + self.patch(nodes_views.NodeListView, 'paginate_by', 2) + nodes = [ + factory.make_Node(created="2012-10-12 12:00:%02d" % i) + for i in range(10)] + tag = factory.make_Tag("odd") + for node in nodes[::2]: + node.tags = [tag] + last_node_link = reverse('node-view', args=[nodes[0].system_id]) + response = self.client.get( + reverse('node-list'), + {"query": "maas-tags=odd", "page": 3}) + document = fromstring(response.content) + self.assertIn("5 matching nodes", document.xpath("string(//h1)")) + self.assertEqual( + [last_node_link], + document.xpath("//div[@id='nodes']/form/table/tr/td[3]/a/@href")) + self.assertEqual( + [ + ("first", "?query=maas-tags%3Dodd"), + ("previous", "?query=maas-tags%3Dodd&page=2") + ], + [ + (a.text.lower(), a.get("href")) + for a in document.xpath("//div[@class='pagination']//a") + ]) + + +class TestWarnUnconfiguredIPAddresses(MAASServerTestCase): + + def test__warns_for_IPv6_address_on_non_ubuntu_OS(self): + network = factory.make_ipv6_network() + osystem = choice(['windows', 'centos', 'suse']) + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + osystem=osystem, network=network) + factory.make_StaticIPAddress( + ip=factory.pick_ip_in_network(network), mac=node.get_primary_mac()) + self.assertTrue(NodeView().warn_unconfigured_ip_addresses(node)) + + def test__warns_for_IPv6_address_on_debian_installer(self): + network = factory.make_ipv6_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + osystem='ubuntu', network=network, boot_type=NODE_BOOT.DEBIAN) + factory.make_StaticIPAddress( + ip=factory.pick_ip_in_network(network), mac=node.get_primary_mac()) + self.assertTrue(NodeView().warn_unconfigured_ip_addresses(node)) + + def test__does_not_warn_for_ubuntu_fast_installer(self): + network = factory.make_ipv6_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + osystem='ubuntu', network=network, boot_type=NODE_BOOT.FASTPATH) + factory.make_StaticIPAddress( + ip=factory.pick_ip_in_network(network), mac=node.get_primary_mac()) + self.assertFalse(NodeView().warn_unconfigured_ip_addresses(node)) + + def test__does_not_warn_for_default_ubuntu_with_fast_installer(self): + network = factory.make_ipv6_network() + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + osystem='ubuntu', network=network, boot_type=NODE_BOOT.FASTPATH) + node.osystem = '' + node.save() + factory.make_StaticIPAddress( + ip=factory.pick_ip_in_network(network), mac=node.get_primary_mac()) + self.assertFalse(NodeView().warn_unconfigured_ip_addresses(node)) + + def test__does_not_warn_for_just_IPv4_address(self): + network = factory.make_ipv4_network() + osystem = choice(['windows', 'centos', 'suse']) + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + osystem=osystem, network=network) + factory.make_StaticIPAddress( + ip=factory.pick_ip_in_network(network), mac=node.get_primary_mac()) + self.assertFalse(NodeView().warn_unconfigured_ip_addresses(node)) + + def test__does_not_warn_without_static_address(self): + osystem = choice(['windows', 'centos', 'suse']) + node = factory.make_node_with_mac_attached_to_nodegroupinterface( + osystem=osystem) + self.assertFalse(NodeView().warn_unconfigured_ip_addresses(node)) + + +class NodeEventLogTest(MAASServerTestCase): + + def test_event_log_shows_event_list(self): + self.client_log_in() + node = factory.make_Node() + events = [ + factory.make_Event(node=node) + for _ in range(NodeView.number_of_events_shown) + ] + response = self.client.get( + reverse('node-event-list-view', args=[node.system_id])) + document = fromstring(response.content) + events_displayed = document.xpath( + "//div[@id='node_event_list']//td[@class='event_description']") + self.assertItemsEqual( + [ + event.type.description + ' \u2014 ' + event.description + for event in events + ], + [ + normalize_text(display.text_content()) + for display in events_displayed + ] + ) + + def test_event_log_is_paginated(self): + self.client_log_in() + self.patch(NodeEventListView, "paginate_by", 3) + node = factory.make_Node() + # Create 4 events. + [factory.make_Event(node=node) for _ in range(4)] + + response = self.client.get( + reverse('node-event-list-view', args=[node.system_id])) + self.assertEqual(httplib.OK, response.status_code) + doc = fromstring(response.content) + self.assertEqual( + 1, len(doc.cssselect('div.pagination')), + "Couldn't find pagination tag.") + + class ConstructThirdPartyDriversNoticeTest(MAASServerTestCase): def test_constructs_notice_without_link_for_normal_users(self): @@ -1157,10 +1928,12 @@ nodes_views.construct_third_party_drivers_notice(True).strip()) -class NodeCommissionResultsDisplayTest(MAASServerTestCase): - """Tests for the link to node commissioning results on the Node page.""" +class NodeResultsDisplayTest(MAASServerTestCase): + """Tests for the link to node commissioning/installation + results on the Node page. + """ - def request_results_display(self, node): + def request_results_display(self, node, result_type): """Request the page for `node`, and extract the results display. Fails if generating, loading or parsing the page failed; or if @@ -1174,7 +1947,10 @@ response = self.client.get(node_link) self.assertEqual(httplib.OK, response.status_code, response.content) doc = fromstring(response.content) - results_display = doc.cssselect('#nodecommissionresults') + if result_type == RESULT_TYPE.COMMISSIONING: + results_display = doc.cssselect('#nodecommissionresults') + elif result_type == RESULT_TYPE.INSTALLATION: + results_display = doc.cssselect('#nodeinstallresults') if len(results_display) == 0: return None elif len(results_display) == 1: @@ -1182,7 +1958,7 @@ else: self.fail("Found more than one matching tag: %s" % results_display) - def get_results_link(self, display): + def get_commissioning_results_link(self, display): """Find the results link in `display`. :param display: Results display section for a node, as returned by @@ -1199,15 +1975,29 @@ else: self.fail("Found more than one link: %s" % links) - def normalise_whitespace(self, text): - """Return a version of `text` where all whitespace is single spaces.""" - return ' '.join(text.split()) + def get_installation_results_link(self, display): + """Find the results link in `display`. + + :param display: Results display section for a node, as returned by + `request_results_display`. + :return: `lxml.html.HtmlElement` for the link to the node's + installation results, as found in `display`; or `None` if it was + not present. + """ + links = display.cssselect('a') + if len(links) == 0: + return None + elif len(links) == 1: + return links[0] + elif len(links) > 1: + return links def test_view_node_links_to_commissioning_results_if_appropriate(self): self.client_log_in(as_admin=True) - result = factory.make_node_commission_result() - section = self.request_results_display(result.node) - link = self.get_results_link(section) + result = factory.make_NodeResult_for_commissioning() + section = self.request_results_display( + result.node, RESULT_TYPE.COMMISSIONING) + link = self.get_commissioning_results_link(section) results_list = reverse('nodecommissionresult-list') self.assertEqual( results_list + '?node=%s' % result.node.system_id, @@ -1215,38 +2005,333 @@ def test_view_node_shows_commissioning_results_only_if_present(self): self.client_log_in(as_admin=True) - node = factory.make_node() - self.assertIsNone(self.request_results_display(node)) + node = factory.make_Node() + self.assertIsNone( + self.request_results_display(node, RESULT_TYPE.COMMISSIONING)) + + def test_view_node_shows_commissioning_results_with_edit_perm(self): + password = 'test' + user = factory.make_User(password=password) + node = factory.make_Node(owner=user) + self.client.login(username=user.username, password=password) + self.logged_in_user = user + result = factory.make_NodeResult_for_commissioning(node=node) + section = self.request_results_display( + result.node, RESULT_TYPE.COMMISSIONING) + link = self.get_commissioning_results_link(section) + self.assertEqual( + "1 output file", + normalise_whitespace(link.text_content())) - def test_view_node_shows_commissioning_results_only_to_superuser(self): - self.client_log_in(as_admin=False) - result = factory.make_node_commission_result() - self.assertIsNone(self.request_results_display(result.node)) + def test_view_node_shows_commissioning_results_requires_edit_perm(self): + password = 'test' + user = factory.make_User(password=password) + node = factory.make_Node() + self.client.login(username=user.username, password=password) + self.logged_in_user = user + result = factory.make_NodeResult_for_commissioning(node=node) + self.assertIsNone( + self.request_results_display( + result.node, RESULT_TYPE.COMMISSIONING)) def test_view_node_shows_single_commissioning_result(self): self.client_log_in(as_admin=True) - result = factory.make_node_commission_result() - section = self.request_results_display(result.node) - link = self.get_results_link(section) + result = factory.make_NodeResult_for_commissioning() + section = self.request_results_display( + result.node, RESULT_TYPE.COMMISSIONING) + link = self.get_commissioning_results_link(section) self.assertEqual( "1 output file", - self.normalise_whitespace(link.text_content())) + normalise_whitespace(link.text_content())) def test_view_node_shows_multiple_commissioning_results(self): self.client_log_in(as_admin=True) - node = factory.make_node() + node = factory.make_Node() num_results = randint(2, 5) for _ in range(num_results): - factory.make_node_commission_result(node=node) - section = self.request_results_display(node) - link = self.get_results_link(section) + factory.make_NodeResult_for_commissioning(node=node) + section = self.request_results_display( + node, RESULT_TYPE.COMMISSIONING) + link = self.get_commissioning_results_link(section) self.assertEqual( "%d output files" % num_results, - self.normalise_whitespace(link.text_content())) + normalise_whitespace(link.text_content())) + + def test_view_node_shows_installation_results_only_if_present(self): + self.client_log_in(as_admin=True) + node = factory.make_Node() + self.assertIsNone( + self.request_results_display(node, RESULT_TYPE.INSTALLATION)) + + def test_view_node_shows_installation_results_with_edit_perm(self): + password = 'test' + user = factory.make_User(password=password) + node = factory.make_Node(owner=user) + self.client.login(username=user.username, password=password) + self.logged_in_user = user + result = factory.make_NodeResult_for_installation(node=node) + section = self.request_results_display( + result.node, RESULT_TYPE.INSTALLATION) + link = self.get_installation_results_link(section) + self.assertNotIn( + normalise_whitespace(link.text_content()), + ('', None)) + + def test_view_node_shows_installation_results_requires_edit_perm(self): + password = 'test' + user = factory.make_User(password=password) + node = factory.make_Node() + self.client.login(username=user.username, password=password) + self.logged_in_user = user + result = factory.make_NodeResult_for_installation(node=node) + self.assertIsNone( + self.request_results_display( + result.node, RESULT_TYPE.INSTALLATION)) + + def test_view_node_shows_single_installation_result(self): + self.client_log_in(as_admin=True) + result = factory.make_NodeResult_for_installation() + section = self.request_results_display( + result.node, RESULT_TYPE.INSTALLATION) + link = self.get_installation_results_link(section) + self.assertEqual( + "install log", + normalise_whitespace(link.text_content())) + + def test_view_node_shows_multiple_installation_results(self): + self.client_log_in(as_admin=True) + node = factory.make_Node() + num_results = randint(2, 5) + results_names = [] + for _ in range(num_results): + node_result = factory.make_NodeResult_for_installation(node=node) + results_names.append(node_result.name) + section = self.request_results_display( + node, RESULT_TYPE.INSTALLATION) + links = self.get_installation_results_link(section) + self.assertThat( + results_names, + ContainsAll( + [normalise_whitespace(link.text_content()) for link in links])) + + +class NodeListingJSReloader(SeleniumTestCase): + + @classmethod + def setUpClass(cls): + raise SkipTest( + "XXX: Gavin Panella 2015-02-26 bug=1426010: " + "All tests using Selenium are breaking.") + + # JS Script that will load a new NodeTableReloader view, placing the + # object on the window. + RELOADER_SCRIPT = dedent("""\ + YUI().use( + 'maas.node_views', 'maas.shortpoll', + function (Y) { + // Place the reloader on the window, giving the ability for + // selenium to access the view. + window.reloader_view = new Y.maas.node_views.NodesTableReloader({ + srcNode: '#node_list'}); + + // Start the poller so it makes the request to retrieve the + // node data. + var ids = window.reloader_view.getNodesList(); + var op_url = ""; + Y.Array.each(ids, function(id) { + op_url += "&id=" + id; + }); + if (op_url.length > 0) { + op_url = "?" + op_url.substr(1); + } + var poller = new Y.maas.shortpoll.ShortPollManager({ + uri: "%s" + op_url + }); + window.reloader_view.addLoader(poller.get("io")); + poller.poll(); + }); + """) + + def get_nodes(self): + """Return the loaded nodes from JS NodesTableReloader.""" + self.get_page('node-list') + + # We execute a script to create a new reloader view. This needs to + # be done to get access to the view. As the view code does not place + # the object on a global variable, which is a good thing. + self.selenium.execute_script( + self.RELOADER_SCRIPT % reverse('node-list')) + + # Extract the loaded nodes list from javascript, to check that + # it loads the correct information. Due to the nature of JS and the + # poller requesting the nodes, we cannot assume that the result + # will be their immediately. We will try for a maximum of + # 5 seconds before giving up. + for _ in range(10): + js_nodes = self.selenium.execute_script( + "return window.reloader_view.nodes;") + if js_nodes is not None: + break + time.sleep(0.5) + if js_nodes is None: + self.fail("Unable to retrieve the loaded nodes from selenium.") + return js_nodes + + def test_node_table_reloader_loads_nodes(self): + self.log_in() + js_nodes = self.get_nodes() + self.assertEquals( + Node.objects.count(), + len(js_nodes), + "NodesTableReloader didn't load all the nodes.") + + def test_node_table_reloader_loads_node_with_correct_attributes(self): + self.log_in() + js_nodes = self.get_nodes() + for node in js_nodes: + self.expectThat(node, ContainsAll([ + 'id', + 'system_id', + 'url', + 'hostname', + 'fqdn', + 'status', + 'owner', + 'cpu_count', + 'memory', + 'storage', + 'power_state', + 'zone', + 'zone_url', + 'mac', + 'vendor', + 'macs', + ])) + + +class TestJSNodeView(SeleniumTestCase): + + @classmethod + def setUpClass(cls): + raise SkipTest( + "XXX: Gavin Panella 2015-02-26 bug=1426010: " + "All tests using Selenium are breaking.") + + # JS Script that will load a new NodeView, placing the + # object on the global window. + VIEW_SCRIPT = dedent("""\ + YUI().use( + 'maas.node', 'maas.node_views', 'maas.shortpoll', + function (Y) { + Y.on('domready', function() { + // Place the view on the window, giving the ability for + // selenium to access it. + window.node_view = new Y.maas.node_views.NodeView({ + srcNode: 'body', + eventList: '#node_event_list', + actionView: '#sidebar' + }); + var poller = new Y.maas.shortpoll.ShortPollManager({ + uri: "%s" + }); + window.node_view.addLoader(poller.get("io")); + poller.poll(); + }); + }); + """) + + def get_js_node(self, node): + """Return the loaded node from JS NodeView.""" + self.get_page('node-view', args=[node.system_id]) + + # We execute a script to create a new view. This needs to + # be done to get access to the view. As the view code does not place + # the object on a global variable, which is a good thing. + self.selenium.execute_script( + self.VIEW_SCRIPT % reverse('node-view', args=[node.system_id])) + + # Extract the load node from javascript, to check that it loads the + # correct information. Due to the nature of JS and the + # poller requesting the node, we cannot assume that the result + # will be their immediately. We will try for a maximum of + # 5 seconds before giving up. + for _ in range(10): + js_node = self.selenium.execute_script( + "return window.node_view.node;") + if js_node is not None: + break + time.sleep(0.5) + if js_node is None: + self.fail("Unable to retrieve the loaded node from selenium.") + return js_node + + def make_node_with_events(self): + node = factory.make_Node() + [factory.make_Event(node=node) for _ in range(3)] + return node + + def test_node_view_loads_node_with_correct_attributes(self): + self.log_in() + with transaction.atomic(): + node = self.make_node_with_events() + js_node = self.get_js_node(node) + self.expectThat(js_node, ContainsAll([ + 'id', + 'system_id', + 'url', + 'hostname', + 'architecture', + 'fqdn', + 'status', + 'owner', + 'cpu_count', + 'memory', + 'storage', + 'power_state', + 'zone', + 'zone_url', + 'mac', + 'vendor', + 'macs', + 'events', + ])) + + def test_node_view_loads_node_with_events(self): + self.log_in() + with transaction.atomic(): + node = self.make_node_with_events() + all_node_events = Event.objects.filter(node=node) + total_events = all_node_events.count() + viewable_events_count = ( + all_node_events.exclude(type__level=logging.DEBUG).count()) + js_node = self.get_js_node(node) + self.expectThat( + js_node['events']['total'], Equals(total_events)) + self.expectThat( + js_node['events']['count'], Equals(viewable_events_count)) + self.expectThat( + js_node['events']['more_url'], + Equals(reverse('node-event-list-view', args=[node.system_id]))) + for event in js_node['events']['events']: + self.expectThat(event, ContainsAll([ + 'id', + 'level', + 'created', + 'type', + 'description', + ])) class NodeListingSelectionJSControls(SeleniumTestCase): + @classmethod + def setUpClass(cls): + raise SkipTest( + "XXX: Gavin Panella 2015-02-26 bug=1426010: " + "All tests using Selenium are breaking.") + + @skip( + "XXX: blake_r 2014-10-02 bug=1376977: Causes intermittent failures") def test_node_list_js_control_select_all(self): self.log_in() self.get_page('node-list') @@ -1281,6 +2366,12 @@ class NodeListingBulkActionSelectionTest(SeleniumTestCase): """Tests for JS event handling on the "bulk action" selection widget.""" + @classmethod + def setUpClass(cls): + raise SkipTest( + "XXX: Gavin Panella 2015-02-26 bug=1426010: " + "All tests using Selenium are breaking.") + def select_action(self, action_name): """Select the given node action.""" action_dropdown = self.selenium.find_element_by_id('id_action') @@ -1292,6 +2383,8 @@ if action.get_attribute('value') == action_name: action.click() + @skip( + "XXX: blake_r 2014-10-02 bug=1376977: Causes intermittent failures") def test_zone_widget_is_visible_only_when_set_zone_selected(self): self.log_in('admin') self.get_page('node-list') @@ -1308,9 +2401,15 @@ class NodeProbedDetailsExpanderTest(SeleniumTestCase): + @classmethod + def setUpClass(cls): + raise SkipTest( + "XXX: Gavin Panella 2015-02-26 bug=1426010: " + "All tests using Selenium are breaking.") + def make_node_with_lldp_output(self): - node = factory.make_node() - factory.make_node_commission_result( + node = factory.make_Node() + factory.make_NodeResult_for_commissioning( node=node, name=LLDP_OUTPUT_NAME, data="bar".encode("utf-8"), script_result=0) @@ -1337,7 +2436,9 @@ # Loading just once. Creating a second node in a separate test causes # an integrity error in the database; clearly that's not working too # well in a Selenium test case. - self.load_node_page(self.make_node_with_lldp_output()) + with transaction.atomic(): + node = self.make_node_with_lldp_output() + self.load_node_page(node) # The ProbedDetails output is in its hidden state. self.assertEqual( @@ -1398,10 +2499,13 @@ % Delete.display_bulk,), ), ] - for params, snippets in params_and_snippets: - message = message_from_form_stats(*params) + # level precedence is worst-case for the concantenation of messages + levels = ['error', 'info', 'error', 'error'] + for index, (params, snippets) in enumerate(params_and_snippets): + message, level = message_from_form_stats(*params) for snippet in snippets: self.assertIn(snippet, message) + self.assertEqual(level, levels[index]) class NodeEnlistmentPreseedViewTest(MAASServerTestCase): @@ -1430,18 +2534,27 @@ self.assertIn(message_chunk, response.content) -class NodePreseedViewTest(MAASServerTestCase): +class NodePreseedViewTest(PreseedRPCMixin, MAASServerTestCase): def test_preseedview_node_displays_preseed_data(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, owner=self.logged_in_user) + boot_image = make_rpc_boot_image(purpose='install') + self.patch( + preseed_module, 'get_boot_images_for').return_value = [boot_image] node_preseed_link = reverse('node-preseed-view', args=[node.system_id]) response = self.client.get(node_preseed_link) - self.assertIn(get_preseed(node), response.content) + escaped = html.escape(get_preseed(node)) + self.assertIn(escaped, response.content) def test_preseedview_node_catches_template_error(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, owner=self.logged_in_user) + boot_image = make_rpc_boot_image(purpose='install') + self.patch( + preseed_module, 'get_boot_images_for').return_value = [boot_image] node_preseed_link = reverse('node-preseed-view', args=[node.system_id]) path = self.make_file(name="generic", contents="{{invalid}}") self.patch( @@ -1451,18 +2564,24 @@ def test_preseedview_node_displays_message_if_commissioning(self): self.client_log_in() - node = factory.make_node( - owner=self.logged_in_user, status=NODE_STATUS.COMMISSIONING, + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, owner=self.logged_in_user, + status=NODE_STATUS.COMMISSIONING, ) node_preseed_link = reverse('node-preseed-view', args=[node.system_id]) response = self.client.get(node_preseed_link) + escaped = html.escape(get_preseed(node)) self.assertThat( response.content, - ContainsAll([get_preseed(node), "This node is commissioning."])) + ContainsAll([escaped, "This node is commissioning."])) def test_preseedview_node_displays_link_to_view_node(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node( + nodegroup=self.rpc_nodegroup, owner=self.logged_in_user) + boot_image = make_rpc_boot_image(purpose='install') + self.patch( + preseed_module, 'get_boot_images_for').return_value = [boot_image] node_preseed_link = reverse('node-preseed-view', args=[node.system_id]) response = self.client.get(node_preseed_link) node_link = reverse('node-view', args=[node.system_id]) @@ -1481,8 +2600,8 @@ # This returns a 404 rather than returning to the node page # with a nice error message because the node could not be found. self.client_log_in() - node_id = factory.getRandomString() - mac = factory.getRandomMACAddress() + node_id = factory.make_string() + mac = factory.make_mac_address() mac_delete_link = reverse('mac-delete', args=[node_id, mac]) response = self.client.get(mac_delete_link) self.assertEqual(httplib.NOT_FOUND, response.status_code) @@ -1491,8 +2610,8 @@ # If the MAC address does not exist, the user is redirected # to the node edit page. self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) - mac = factory.getRandomMACAddress() + node = factory.make_Node(owner=self.logged_in_user) + mac = factory.make_mac_address() mac_delete_link = reverse('mac-delete', args=[node.system_id, mac]) response = self.client.get(mac_delete_link) self.assertEqual( @@ -1501,16 +2620,16 @@ def test_node_delete_access_denied_if_user_cannot_edit_node(self): self.client_log_in() - node = factory.make_node(owner=factory.make_user()) - mac = factory.make_mac_address(node=node) + node = factory.make_Node(owner=factory.make_User()) + mac = factory.make_MACAddress(node=node) mac_delete_link = reverse('mac-delete', args=[node.system_id, mac]) response = self.client.get(mac_delete_link) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_node_delete_mac_contains_mac(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) - mac = factory.make_mac_address(node=node) + node = factory.make_Node(owner=self.logged_in_user) + mac = factory.make_MACAddress(node=node) mac_delete_link = reverse('mac-delete', args=[node.system_id, mac]) response = self.client.get(mac_delete_link) self.assertIn( @@ -1520,8 +2639,8 @@ def test_node_delete_mac_POST_deletes_mac(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) - mac = factory.make_mac_address(node=node) + node = factory.make_Node(owner=self.logged_in_user) + mac = factory.make_MACAddress(node=node) mac_delete_link = reverse('mac-delete', args=[node.system_id, mac]) response = self.client.post(mac_delete_link, {'post': 'yes'}) self.assertEqual( @@ -1531,8 +2650,8 @@ def test_node_delete_mac_POST_displays_message(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) - mac = factory.make_mac_address(node=node) + node = factory.make_Node(owner=self.logged_in_user) + mac = factory.make_MACAddress(node=node) mac_delete_link = reverse('mac-delete', args=[node.system_id, mac]) response = self.client.post(mac_delete_link, {'post': 'yes'}) redirect = extract_redirect(response) @@ -1543,9 +2662,9 @@ def test_node_delete_mac_POST_disconnects_MAC_from_network(self): self.client_log_in() - network = factory.make_network() - node = factory.make_node(owner=self.logged_in_user) - mac = factory.make_mac_address(node=node, networks=[network]) + network = factory.make_Network() + node = factory.make_Node(owner=self.logged_in_user) + mac = factory.make_MACAddress(node=node, networks=[network]) response = self.client.post( reverse('mac-delete', args=[node.system_id, mac]), {'post': 'yes'}) self.assertEqual(httplib.FOUND, response.status_code) @@ -1556,7 +2675,7 @@ def test_node_add_mac_contains_form(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) mac_add_link = reverse('mac-add', args=[node.system_id]) response = self.client.get(mac_add_link) doc = fromstring(response.content) @@ -1564,9 +2683,9 @@ def test_node_add_mac_POST_adds_mac(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) mac_add_link = reverse('mac-add', args=[node.system_id]) - mac = factory.getRandomMACAddress() + mac = factory.make_mac_address() response = self.client.post(mac_add_link, {'mac_address': mac}) self.assertEqual( reverse('node-edit', args=[node.system_id]), @@ -1576,9 +2695,9 @@ def test_node_add_mac_POST_displays_message(self): self.client_log_in() - node = factory.make_node(owner=self.logged_in_user) + node = factory.make_Node(owner=self.logged_in_user) mac_add_link = reverse('mac-add', args=[node.system_id]) - mac = factory.getRandomMACAddress() + mac = factory.make_mac_address() response = self.client.post(mac_add_link, {'mac_address': mac}) redirect = extract_redirect(response) response = self.client.get(redirect) @@ -1591,11 +2710,11 @@ def test_admin_can_edit_nodes(self): self.client_log_in(as_admin=True) - node = factory.make_node(owner=factory.make_user()) + node = factory.make_Node(owner=factory.make_User()) node_edit_link = reverse('node-edit', args=[node.system_id]) params = { - 'hostname': factory.getRandomString(), - 'power_type': factory.getRandomPowerType(), + 'hostname': factory.make_string(), + 'power_type': factory.pick_power_type(), 'architecture': make_usable_architecture(self), } response = self.client.post(node_edit_link, params) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_prefs.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_prefs.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_prefs.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_prefs.py 2015-07-10 01:27:14.000000000 +0000 @@ -54,7 +54,7 @@ self.client_log_in() user = self.logged_in_user # Create a few tokens. - for i in range(3): + for _ in range(3): user.get_profile().create_authorisation_token() response = self.client.get('/account/prefs/') doc = fromstring(response.content) @@ -170,7 +170,7 @@ def test_key_can_be_added_if_same_key_already_setup_for_other_user(self): self.client_log_in() key_string = get_data('data/test_rsa0.pub') - key = SSHKey(user=factory.make_user(), key=key_string) + key = SSHKey(user=factory.make_User(), key=key_string) key.save() response = self.client.post( reverse('prefs-add-sshkey'), {'key': key_string}) @@ -183,7 +183,7 @@ def test_delete_key_GET(self): # The 'Delete key' page displays a confirmation page with a form. self.client_log_in() - key = factory.make_sshkey(self.logged_in_user) + key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) response = self.client.get(del_link) doc = fromstring(response.content) @@ -199,7 +199,7 @@ def test_delete_key_GET_cannot_access_someone_elses_key(self): self.client_log_in() - key = factory.make_sshkey(factory.make_user()) + key = factory.make_SSHKey(factory.make_User()) del_link = reverse('prefs-delete-sshkey', args=[key.id]) response = self.client.get(del_link) @@ -209,7 +209,7 @@ # Deleting a nonexistent key requires no confirmation. It just # "succeeds" instantaneously. self.client_log_in() - key = factory.make_sshkey(self.logged_in_user) + key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) key.delete() response = self.client.get(del_link) @@ -218,7 +218,7 @@ def test_delete_key_POST(self): # A POST request deletes the key, and redirects to the prefs. self.client_log_in() - key = factory.make_sshkey(self.logged_in_user) + key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) response = self.client.post(del_link, {'post': 'yes'}) @@ -229,7 +229,7 @@ # Deleting a key that's already been deleted? Basically that's # success. self.client_log_in() - key = factory.make_sshkey(self.logged_in_user) + key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) key.delete() response = self.client.post(del_link, {'post': 'yes'}) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_rpc.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_rpc.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_rpc.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_rpc.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,8 +19,10 @@ from crochet import run_in_reactor from django.core.urlresolvers import reverse from maasserver import eventloop +from maasserver.testing.eventloop import RegionEventLoopFixture from maasserver.testing.testcase import MAASServerTestCase -from provisioningserver.utils import get_all_interface_addresses +from netaddr import IPAddress +from provisioningserver.utils.network import get_all_interface_addresses from testtools.matchers import ( Equals, GreaterThan, @@ -28,8 +30,8 @@ KeysEqual, LessThan, MatchesAll, - MatchesDict, MatchesListwise, + MatchesSetwise, ) from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread @@ -41,15 +43,27 @@ class RPCViewTest(MAASServerTestCase): - def test_rpc_info(self): + def test_rpc_info_when_rpc_advertise_not_present(self): + getServiceNamed = self.patch_autospec( + eventloop.services, "getServiceNamed") + getServiceNamed.side_effect = KeyError + + response = self.client.get(reverse('rpc-info')) + self.assertEqual("application/json", response["Content-Type"]) + info = json.loads(response.content) + self.assertEqual({"eventloops": None}, info) + + def test_rpc_info_when_rpc_advertise_not_running(self): response = self.client.get(reverse('rpc-info')) self.assertEqual("application/json", response["Content-Type"]) info = json.loads(response.content) - self.assertEqual({"eventloops": {}}, info) + self.assertEqual({"eventloops": None}, info) + + def test_rpc_info_when_rpc_advertise_running(self): + self.useFixture(RegionEventLoopFixture("rpc", "rpc-advertise")) - def test_rpc_info_when_rpc_running(self): eventloop.start().wait(5) - self.addCleanup(lambda: eventloop.stop().wait(5)) + self.addCleanup(lambda: eventloop.reset().wait(5)) getServiceNamed = eventloop.services.getServiceNamed @@ -69,12 +83,15 @@ self.assertEqual("application/json", response["Content-Type"]) info = json.loads(response.content) self.assertThat(info, KeysEqual("eventloops")) - self.assertThat(info["eventloops"], MatchesDict({ + self.assertThat(info["eventloops"], KeysEqual(eventloop.loop.name)) + self.assertThat( + info["eventloops"][eventloop.loop.name], # Each entry in the endpoints dict is a mapping from an # event loop to a list of (host, port) tuples. Each tuple is # a potential endpoint for connecting into that event loop. - eventloop.loop.name: MatchesListwise([ + MatchesSetwise(*[ MatchesListwise((Equals(addr), is_valid_port)) for addr in get_all_interface_addresses() - ]), - })) + if not IPAddress(addr).is_link_local() + and IPAddress(addr).version == 4 + ])) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_settings_commissioning_scripts.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_settings_commissioning_scripts.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_settings_commissioning_scripts.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_settings_commissioning_scripts.py 2015-07-10 01:27:14.000000000 +0000 @@ -39,8 +39,8 @@ def test_settings_contains_names_and_content_of_scripts(self): self.client_log_in(as_admin=True) scripts = { - factory.make_commissioning_script(), - factory.make_commissioning_script(), + factory.make_CommissioningScript(), + factory.make_CommissioningScript(), } response = self.client.get(reverse('settings')) names = [script.name for script in scripts] @@ -56,8 +56,8 @@ def test_settings_contains_links_to_delete_scripts(self): self.client_log_in(as_admin=True) scripts = { - factory.make_commissioning_script(), - factory.make_commissioning_script(), + factory.make_CommissioningScript(), + factory.make_CommissioningScript(), } links = get_content_links(self.client.get(reverse('settings'))) script_delete_links = [ @@ -80,7 +80,7 @@ def test_can_delete_commissioning_script(self): self.client_log_in(as_admin=True) - script = factory.make_commissioning_script() + script = factory.make_CommissioningScript() delete_link = reverse('commissioning-script-delete', args=[script.id]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( @@ -94,7 +94,7 @@ def test_can_create_commissioning_script(self): self.client_log_in(as_admin=True) - content = factory.getRandomString() + content = factory.make_string() name = factory.make_name('filename') create_link = reverse('commissioning-script-add') filepath = self.make_file(name=name, contents=content) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_settings_license_keys.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_settings_license_keys.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_settings_license_keys.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_settings_license_keys.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,184 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test maasserver license key settings views.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import httplib + +from django.core.urlresolvers import reverse +from lxml.html import fromstring +from maasserver import forms +from maasserver.clusterrpc.testing.osystems import ( + make_rpc_osystem, + make_rpc_release, + ) +from maasserver.models import LicenseKey +from maasserver.testing import ( + extract_redirect, + get_content_links, + ) +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.osystems import patch_usable_osystems +from maasserver.testing.testcase import MAASServerTestCase +from maasserver.views import settings as settings_view +from maasserver.views.settings_license_keys import LICENSE_KEY_ANCHOR +from testtools.matchers import ContainsAll + + +def make_osystem_requiring_license_key(osystem=None, distro_series=None): + if osystem is None: + osystem = factory.make_name('osystem') + if distro_series is None: + distro_series = factory.make_name('distro_series') + rpc_release = make_rpc_release( + distro_series, requires_license_key=True) + rpc_osystem = make_rpc_osystem(osystem, releases=[rpc_release]) + return rpc_osystem + + +class LicenseKeyListingTest(MAASServerTestCase): + + def make_license_key_with_os(self, osystem=None, distro_series=None, + license_key=None): + license_key = factory.make_LicenseKey( + osystem=osystem, distro_series=distro_series, + license_key=license_key) + osystem = make_osystem_requiring_license_key( + license_key.osystem, license_key.distro_series) + return license_key, osystem + + def make_license_keys(self, count): + keys = [] + osystems = [] + for _ in range(count): + key, osystem = self.make_license_key_with_os() + keys.append(key) + osystems.append(osystem) + patch_usable_osystems(self, osystems=osystems) + self.patch( + settings_view, + 'gen_all_known_operating_systems').return_value = osystems + return keys, osystems + + def test_settings_contains_osystem_and_distro_series(self): + self.client_log_in(as_admin=True) + keys, _ = self.make_license_keys(3) + response = self.client.get(reverse('settings')) + os_titles = [key.osystem for key in keys] + series_titles = [key.distro_series for key in keys] + self.assertThat( + response.content, ContainsAll(os_titles + series_titles)) + + def test_settings_link_to_add_license_key(self): + self.client_log_in(as_admin=True) + self.make_license_keys(3) + links = get_content_links(self.client.get(reverse('settings'))) + script_add_link = reverse('license-key-add') + self.assertIn(script_add_link, links) + + def test_settings_contains_links_to_delete(self): + self.client_log_in(as_admin=True) + keys, _ = self.make_license_keys(3) + links = get_content_links(self.client.get(reverse('settings'))) + license_key_delete_links = [ + reverse( + 'license-key-delete', args=[key.osystem, key.distro_series]) + for key in keys] + self.assertThat(links, ContainsAll(license_key_delete_links)) + + def test_settings_contains_links_to_edit(self): + self.client_log_in(as_admin=True) + keys, _ = self.make_license_keys(3) + links = get_content_links(self.client.get(reverse('settings'))) + license_key_delete_links = [ + reverse( + 'license-key-edit', args=[key.osystem, key.distro_series]) + for key in keys] + self.assertThat(links, ContainsAll(license_key_delete_links)) + + def test_settings_contains_commissioning_scripts_slot_anchor(self): + self.client_log_in(as_admin=True) + self.make_license_keys(3) + response = self.client.get(reverse('settings')) + document = fromstring(response.content) + slots = document.xpath( + "//div[@id='%s']" % LICENSE_KEY_ANCHOR) + self.assertEqual( + 1, len(slots), + "Missing anchor '%s'" % LICENSE_KEY_ANCHOR) + + +class LicenseKeyAddTest(MAASServerTestCase): + + def test_can_create_license_key(self): + self.client_log_in(as_admin=True) + osystem = make_osystem_requiring_license_key() + patch_usable_osystems(self, osystems=[osystem]) + self.patch(forms, 'validate_license_key').return_value = True + series = osystem['default_release'] + key = factory.make_name('key') + add_link = reverse('license-key-add') + definition = { + 'osystem': osystem['name'], + 'distro_series': series, + 'license_key': key, + } + response = self.client.post(add_link, definition) + self.assertEqual( + (httplib.FOUND, reverse('settings')), + (response.status_code, extract_redirect(response))) + new_license_key = LicenseKey.objects.get( + osystem=osystem['name'], distro_series=series) + self.assertAttributes(new_license_key, definition) + + +class LicenseKeyEditTest(MAASServerTestCase): + + def test_can_update_license_key(self): + self.client_log_in(as_admin=True) + key = factory.make_LicenseKey() + osystem = make_osystem_requiring_license_key( + key.osystem, key.distro_series) + patch_usable_osystems(self, osystems=[osystem]) + self.patch(forms, 'validate_license_key').return_value = True + new_key = factory.make_name('key') + edit_link = reverse( + 'license-key-edit', args=[key.osystem, key.distro_series]) + definition = { + 'osystem': key.osystem, + 'distro_series': key.distro_series, + 'license_key': new_key, + } + response = self.client.post(edit_link, definition) + self.assertEqual( + (httplib.FOUND, reverse('settings')), + (response.status_code, extract_redirect(response))) + self.assertAttributes(reload_object(key), definition) + + +class LicenseKeyDeleteTest(MAASServerTestCase): + + def test_can_delete_license_key(self): + self.client_log_in(as_admin=True) + key = factory.make_LicenseKey() + delete_link = reverse( + 'license-key-delete', args=[key.osystem, key.distro_series]) + response = self.client.post(delete_link, {'post': 'yes'}) + self.assertEqual( + (httplib.FOUND, reverse('settings')), + (response.status_code, extract_redirect(response))) + self.assertFalse( + LicenseKey.objects.filter( + osystem=key.osystem, distro_series=key.distro_series).exists()) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_settings.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_settings.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_settings.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_settings.py 2015-07-10 01:27:14.000000000 +0000 @@ -20,21 +20,27 @@ from django.contrib.auth.models import User from django.core.urlresolvers import reverse from lxml.html import fromstring -from maasserver.enum import ( - COMMISSIONING_DISTRO_SERIES_CHOICES, - DISTRO_SERIES, +from maasserver.clusterrpc.testing.osystems import ( + make_rpc_osystem, + make_rpc_release, ) from maasserver.models import ( + BootSource, Config, UserProfile, ) from maasserver.testing import ( extract_redirect, get_prefixed_form_data, - reload_object, ) from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.osystems import ( + make_usable_osystem, + patch_usable_osystems, + ) from maasserver.testing.testcase import MAASServerTestCase +from maasserver.views import settings as settings_view class SettingsTest(MAASServerTestCase): @@ -44,7 +50,7 @@ # delete or edit each user. Note that the link to delete the the # logged-in user is not display. self.client_log_in(as_admin=True) - [factory.make_user() for i in range(3)] + [factory.make_User() for _ in range(3)] users = UserProfile.objects.all_users() response = self.client.get(reverse('settings')) doc = fromstring(response.content) @@ -88,9 +94,9 @@ # Disable the DNS machinery so that we can skip the required # setup. self.patch(settings, "DNS_CONNECT", False) - new_name = factory.getRandomString() - new_domain = factory.getRandomString() - new_proxy = "http://%s.example.com:1234/" % factory.getRandomString() + new_name = factory.make_string() + new_domain = factory.make_string() + new_proxy = "http://%s.example.com:1234/" % factory.make_string() response = self.client.post( reverse('settings'), get_prefixed_form_data( @@ -111,9 +117,12 @@ def test_settings_commissioning_POST(self): self.client_log_in(as_admin=True) - new_check_compatibility = factory.getRandomBoolean() - new_commissioning_distro_series = factory.getRandomChoice( - COMMISSIONING_DISTRO_SERIES_CHOICES) + release = make_rpc_release(can_commission=True) + osystem = make_rpc_osystem('ubuntu', releases=[release]) + patch_usable_osystems(self, [osystem]) + + new_check_compatibility = factory.pick_bool() + new_commissioning = release['name'] response = self.client.post( reverse('settings'), get_prefixed_form_data( @@ -121,23 +130,44 @@ data={ 'check_compatibility': new_check_compatibility, 'commissioning_distro_series': ( - new_commissioning_distro_series), + new_commissioning), })) self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual( ( new_check_compatibility, - new_commissioning_distro_series, + new_commissioning, ), ( Config.objects.get_config('check_compatibility'), Config.objects.get_config('commissioning_distro_series'), )) + def test_settings_hides_license_keys_if_no_OS_supporting_keys(self): + self.client_log_in(as_admin=True) + response = self.client.get(reverse('settings')) + doc = fromstring(response.content) + license_keys = doc.cssselect('#license_keys') + self.assertEqual( + 0, len(license_keys), "Didn't hide the license key section.") + + def test_settings_shows_license_keys_if_OS_supporting_keys(self): + self.client_log_in(as_admin=True) + release = make_rpc_release(requires_license_key=True) + osystem = make_rpc_osystem(releases=[release]) + self.patch( + settings_view, + 'gen_all_known_operating_systems').return_value = [osystem] + response = self.client.get(reverse('settings')) + doc = fromstring(response.content) + license_keys = doc.cssselect('#license_keys') + self.assertEqual( + 1, len(license_keys), "Didn't show the license key section.") + def test_settings_third_party_drivers_POST(self): self.client_log_in(as_admin=True) - new_enable_third_party_drivers = factory.getRandomBoolean() + new_enable_third_party_drivers = factory.pick_bool() response = self.client.post( reverse('settings'), get_prefixed_form_data( @@ -156,11 +186,59 @@ Config.objects.get_config('enable_third_party_drivers'), )) + def test_settings_disk_erasing_on_release_POST(self): + self.client_log_in(as_admin=True) + new_enable_disk_erasing_on_release = factory.pick_bool() + response = self.client.post( + reverse('settings'), + get_prefixed_form_data( + prefix='disk_erasing_on_release', + data={ + 'enable_disk_erasing_on_release': ( + new_enable_disk_erasing_on_release), + })) + + self.assertEqual(httplib.FOUND, response.status_code) + self.assertEqual( + ( + new_enable_disk_erasing_on_release, + ), + ( + Config.objects.get_config('enable_disk_erasing_on_release'), + )) + + def test_settings_deploy_POST(self): + self.client_log_in(as_admin=True) + osystem = make_usable_osystem(self) + osystem_name = osystem['name'] + release_name = osystem['default_release'] + response = self.client.post( + reverse('settings'), + get_prefixed_form_data( + prefix='deploy', + data={ + 'default_osystem': osystem_name, + 'default_distro_series': '%s/%s' % ( + osystem_name, + release_name, + ), + })) + + self.assertEqual(httplib.FOUND, response.status_code, response.content) + self.assertEqual( + ( + osystem_name, + release_name, + ), + ( + Config.objects.get_config('default_osystem'), + Config.objects.get_config('default_distro_series'), + )) + def test_settings_ubuntu_POST(self): self.client_log_in(as_admin=True) new_main_archive = 'http://test.example.com/archive' new_ports_archive = 'http://test2.example.com/archive' - new_default_distro_series = factory.getRandomEnum(DISTRO_SERIES) response = self.client.post( reverse('settings'), get_prefixed_form_data( @@ -168,7 +246,6 @@ data={ 'main_archive': new_main_archive, 'ports_archive': new_ports_archive, - 'default_distro_series': new_default_distro_series, })) self.assertEqual(httplib.FOUND, response.status_code, response.content) @@ -176,12 +253,10 @@ ( new_main_archive, new_ports_archive, - new_default_distro_series, ), ( Config.objects.get_config('main_archive'), Config.objects.get_config('ports_archive'), - Config.objects.get_config('default_distro_series'), )) def test_settings_kernelopts_POST(self): @@ -200,6 +275,65 @@ new_kernel_opts, Config.objects.get_config('kernel_opts')) + def test_settings_boot_source_is_shown(self): + self.client_log_in(as_admin=True) + response = self.client.get(reverse('settings')) + doc = fromstring(response.content) + boot_source = doc.cssselect('#boot_source') + self.assertEqual( + 1, len(boot_source), "Didn't show boot image settings section.") + + def test_settings_boot_source_is_not_shown(self): + self.client_log_in(as_admin=True) + for _ in range(2): + factory.make_BootSource() + response = self.client.get(reverse('settings')) + doc = fromstring(response.content) + boot_source = doc.cssselect('#boot_source') + self.assertEqual( + 0, len(boot_source), "Didn't hide boot image settings section.") + + def test_settings_boot_source_POST_creates_new_source(self): + self.client_log_in(as_admin=True) + url = "http://test.example.com/archive" + keyring = "/usr/local/testing/path.gpg" + response = self.client.post( + reverse('settings'), + get_prefixed_form_data( + prefix='boot_source', + data={ + 'boot_source_url': url, + 'boot_source_keyring': keyring, + })) + + self.assertEqual(httplib.FOUND, response.status_code, response.content) + + boot_source = BootSource.objects.first() + self.assertIsNotNone(boot_source) + self.assertEqual( + (url, keyring), + (boot_source.url, boot_source.keyring_filename)) + + def test_settings_boot_source_POST_updates_source(self): + self.client_log_in(as_admin=True) + boot_source = factory.make_BootSource() + url = "http://test.example.com/archive" + keyring = "/usr/local/testing/path.gpg" + response = self.client.post( + reverse('settings'), + get_prefixed_form_data( + prefix='boot_source', + data={ + 'boot_source_url': url, + 'boot_source_keyring': keyring, + })) + + self.assertEqual(httplib.FOUND, response.status_code, response.content) + boot_source = reload_object(boot_source) + self.assertEqual( + (url, keyring), + (boot_source.url, boot_source.keyring_filename)) + class NonAdminSettingsTest(MAASServerTestCase): @@ -252,12 +386,12 @@ def test_add_user_POST(self): self.client_log_in(as_admin=True) params = { - 'username': factory.getRandomString(), - 'last_name': factory.getRandomString(30), - 'email': factory.getRandomEmail(), - 'is_superuser': factory.getRandomBoolean(), + 'username': factory.make_string(), + 'last_name': factory.make_string(30), + 'email': factory.make_email_address(), + 'is_superuser': factory.pick_bool(), } - password = factory.getRandomString() + password = factory.make_string() params.update(make_password_params(password)) response = self.client.post(reverse('accounts-add'), params) @@ -268,11 +402,11 @@ def test_edit_user_POST_profile_updates_attributes(self): self.client_log_in(as_admin=True) - user = factory.make_user() + user = factory.make_User() params = make_user_attribute_params(user) params.update({ 'last_name': factory.make_name('Newname'), - 'email': 'new-%s@example.com' % factory.getRandomString(), + 'email': 'new-%s@example.com' % factory.make_string(), 'is_superuser': True, 'username': factory.make_name('newname'), }) @@ -287,8 +421,8 @@ def test_edit_user_POST_updates_password(self): self.client_log_in(as_admin=True) - user = factory.make_user() - new_password = factory.getRandomString() + user = factory.make_User() + new_password = factory.make_string() params = make_password_params(new_password) response = self.client.post( reverse('accounts-edit', args=[user.username]), @@ -299,7 +433,7 @@ def test_delete_user_GET(self): # The user delete page displays a confirmation page with a form. self.client_log_in(as_admin=True) - user = factory.make_user() + user = factory.make_User() del_link = reverse('accounts-del', args=[user.username]) response = self.client.get(del_link) doc = fromstring(response.content) @@ -318,7 +452,7 @@ def test_delete_user_POST(self): # A POST request to the user delete finally deletes the user. self.client_log_in(as_admin=True) - user = factory.make_user() + user = factory.make_User() user_id = user.id del_link = reverse('accounts-del', args=[user.username]) response = self.client.post(del_link, {'post': 'yes'}) @@ -328,7 +462,7 @@ def test_view_user(self): # The user page feature the basic information about the user. self.client_log_in(as_admin=True) - user = factory.make_user() + user = factory.make_User() del_link = reverse('accounts-view', args=[user.username]) response = self.client.get(del_link) doc = fromstring(response.content) @@ -339,7 +473,7 @@ def test_account_views_are_routable_for_full_range_of_usernames(self): # Usernames can include characters in the regex [\w.@+-]. self.client_log_in(as_admin=True) - user = factory.make_user(username="abc-123@example.com") + user = factory.make_User(username="abc-123@example.com") for view in "edit", "view", "del": path = reverse("accounts-%s" % view, args=[user.username]) self.assertIsInstance(path, (bytes, unicode)) diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_snippets.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_snippets.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_snippets.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_snippets.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,47 @@ +# Copyright 2015 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test maasserver snippets.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + + +from django.core.urlresolvers import reverse +from lxml.html import fromstring +from maasserver.testing.testcase import MAASServerTestCase +from testtools.matchers import ( + HasLength, + Not, + ) + + +class SnippetsTest(MAASServerTestCase): + + def test_index_page_containts_add_node_snippet(self): + self.client_log_in() + index_page = self.client.get(reverse('index')) + doc = fromstring(index_page.content) + self.assertEqual( + 'text/x-template', doc.cssselect('#add-node')[0].attrib['type']) + + def test_add_node_snippet_hides_osystem_distro_series_labels(self): + self.client_log_in() + index_page = self.client.get(reverse('index')) + doc = fromstring(index_page.content) + content_text = doc.cssselect('#add-node')[0].text_content() + add_node_snippet = fromstring(content_text) + self.expectThat( + add_node_snippet.cssselect("label[for=id_osystem].hidden"), + Not(HasLength(0)), "No hidden id_osystem label") + self.expectThat( + add_node_snippet.cssselect("label[for=id_distro_series].hidden"), + Not(HasLength(0)), "No hidden id_distro_series label") diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_tags.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_tags.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_tags.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -30,7 +30,7 @@ def test_view_tag_displays_tag_info(self): # The tag page features the basic information about the tag. self.client_log_in() - tag = factory.make_tag( + tag = factory.make_Tag( name='the-named-tag', comment='Human description of the tag', definition='//xpath') @@ -43,10 +43,10 @@ def test_view_tag_includes_node_links(self): self.client_log_in() - tag = factory.make_tag() - node = factory.make_node() + tag = factory.make_Tag() + node = factory.make_Node() node.tags.add(tag) - mac = factory.make_mac_address(node=node).mac_address + mac = factory.make_MACAddress(node=node).mac_address tag_link = reverse('tag-view', args=[tag.name]) node_link = reverse('node-view', args=[node.system_id]) response = self.client.get(tag_link) @@ -58,12 +58,17 @@ self.assertIn(node_link, get_content_links(response)) def test_view_tag_num_queries_is_independent_of_num_nodes(self): + # XXX: GavinPanella 2014-10-03 bug=1377335 + self.skip("Unreliable; something is causing varying counts.") + self.client_log_in() - tag = factory.make_tag() + tag = factory.make_Tag() tag_link = reverse('tag-view', args=[tag.name]) - nodegroup = factory.make_node_group() - nodes = [factory.make_node(nodegroup=nodegroup, mac=True) - for i in range(20)] + nodegroup = factory.make_NodeGroup() + nodes = [ + factory.make_Node(nodegroup=nodegroup, mac=True) + for _ in range(20) + ] for node in nodes[:10]: node.tags.add(tag) num_queries, response = count_queries(self.client.get, tag_link) @@ -85,9 +90,9 @@ def test_view_tag_hides_private_nodes(self): self.client_log_in() - tag = factory.make_tag() - node = factory.make_node() - node2 = factory.make_node(owner=factory.make_user()) + tag = factory.make_Tag() + node = factory.make_Node() + node2 = factory.make_Node(owner=factory.make_User()) node.tags.add(tag) node2.tags.add(tag) tag_link = reverse('tag-view', args=[tag.name]) @@ -99,8 +104,8 @@ def test_view_tag_shows_kernel_params(self): self.client_log_in() - tag = factory.make_tag(kernel_opts='--test tag params') - node = factory.make_node() + tag = factory.make_Tag(kernel_opts='--test tag params') + node = factory.make_Node() node.tags = [tag] tag_link = reverse('tag-view', args=[tag.name]) response = self.client.get(tag_link) @@ -118,9 +123,9 @@ # Set a very small page size to save creating lots of nodes page_size = 2 self.patch(tags_views.TagView, 'paginate_by', page_size) - tag = factory.make_tag() + tag = factory.make_Tag() nodes = [ - factory.make_node(created="2012-10-12 12:00:%02d" % i) + factory.make_Node(created="2012-10-12 12:00:%02d" % i) for i in range(page_size * 2 + 1) ] for node in nodes: diff -Nru maas-1.5.4+bzr2294/src/maasserver/views/tests/test_zones.py maas-1.7.6+bzr3376/src/maasserver/views/tests/test_zones.py --- maas-1.5.4+bzr2294/src/maasserver/views/tests/test_zones.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/views/tests/test_zones.py 2015-07-10 01:27:14.000000000 +0000 @@ -26,9 +26,9 @@ from maasserver.testing import ( extract_redirect, get_content_links, - reload_object, ) from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.views.zones import ( ZoneAdd, @@ -56,7 +56,7 @@ def test_zone_list_displays_zone_details(self): # Zone listing displays the zone name and the zone description. self.client_log_in() - [factory.make_zone() for i in range(3)] + [factory.make_Zone() for _ in range(3)] zones = Zone.objects.all() response = self.client.get(reverse('zone-list')) zone_names = [zone.name for zone in zones] @@ -69,7 +69,7 @@ def test_zone_list_displays_sorted_list_of_zones(self): # Zones are alphabetically sorted on the zone list page. self.client_log_in() - [factory.make_zone(sortable_name=True) for i in range(3)] + [factory.make_Zone(sortable_name=True) for _ in range(3)] zones = Zone.objects.all() sorted_zones = sorted(zones, key=lambda x: x.name.lower()) response = self.client.get(reverse('zone-list')) @@ -83,7 +83,7 @@ def test_zone_list_displays_links_to_zone_node(self): self.client_log_in() - [factory.make_zone(sortable_name=True) for i in range(3)] + [factory.make_Zone(sortable_name=True) for _ in range(3)] zones = Zone.objects.all() sorted_zones = sorted(zones, key=lambda x: x.name.lower()) response = self.client.get(reverse('zone-list')) @@ -101,7 +101,7 @@ def test_zone_list_does_not_contain_edit_and_delete_links(self): self.client_log_in() - zones = [factory.make_zone() for i in range(3)] + zones = [factory.make_Zone() for _ in range(3)] response = self.client.get(reverse('zone-list')) zone_edit_links = [ reverse('zone-edit', args=[zone.name]) for zone in zones] @@ -125,7 +125,7 @@ self.patch(ZoneListView, "paginate_by", 3) self.client_log_in(as_admin=True) # Create 4 zones. - [factory.make_zone() for _ in range(4)] + [factory.make_Zone() for _ in range(4)] response = self.client.get(reverse('zone-list')) self.assertEqual(httplib.OK, response.status_code) doc = fromstring(response.content) @@ -138,7 +138,7 @@ def test_zone_list_contains_edit_links(self): self.client_log_in(as_admin=True) - zones = [factory.make_zone() for i in range(3)] + zones = [factory.make_Zone() for _ in range(3)] default_zone = Zone.objects.get_default_zone() zone_edit_links = [ reverse('zone-edit', args=[zone.name]) for zone in zones] @@ -181,7 +181,7 @@ self.client_log_in(as_admin=True) definition = { 'name': factory.make_name('zone'), - 'description': factory.getRandomString(), + 'description': factory.make_string(), } response = self.client.post(reverse('zone-add'), definition) self.assertEqual(httplib.FOUND, response.status_code) @@ -209,7 +209,7 @@ # The Zone detail view displays the zone name and the zone # description. self.client_log_in() - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) self.assertThat(response.content, Contains(zone.name)) self.assertThat( @@ -217,8 +217,8 @@ def test_zone_detail_displays_node_count(self): self.client_log_in() - zone = factory.make_zone() - node = factory.make_node() + zone = factory.make_Zone() + node = factory.make_Node() node.zone = zone response = self.client.get(reverse('zone-view', args=[zone.name])) document = fromstring(response.content) @@ -228,8 +228,8 @@ def test_zone_detail_links_to_node_list(self): self.client_log_in() - zone = factory.make_zone() - node = factory.make_node() + zone = factory.make_Zone() + node = factory.make_Node() node.zone = zone response = self.client.get(reverse('zone-view', args=[zone.name])) zone_node_link = ( @@ -243,14 +243,14 @@ def test_zone_detail_does_not_contain_edit_link(self): self.client_log_in() - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_edit_link = reverse('zone-edit', args=[zone.name]) self.assertNotIn(zone_edit_link, get_content_links(response)) def test_zone_detail_does_not_contain_delete_link(self): self.client_log_in() - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_delete_link = reverse('zone-del', args=[zone.name]) self.assertNotIn(zone_delete_link, get_content_links(response)) @@ -260,14 +260,14 @@ def test_zone_detail_contains_edit_link(self): self.client_log_in(as_admin=True) - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_edit_link = reverse('zone-edit', args=[zone.name]) self.assertIn(zone_edit_link, get_content_links(response)) def test_zone_detail_contains_delete_link(self): self.client_log_in(as_admin=True) - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_delete_link = reverse('zone-del', args=[zone.name]) self.assertIn(zone_delete_link, get_content_links(response)) @@ -284,7 +284,7 @@ def test_cannot_access_zone_edit(self): self.client_log_in() - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.post(reverse('zone-edit', args=[zone.name])) self.assertEqual(reverse('login'), extract_redirect(response)) @@ -293,7 +293,7 @@ def test_zone_edit(self): self.client_log_in(as_admin=True) - zone = factory.make_zone() + zone = factory.make_Zone() new_name = factory.make_name('name') new_description = factory.make_name('description') response = self.client.post( @@ -316,7 +316,7 @@ def test_cannot_delete(self): self.client_log_in() - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.post(reverse('zone-del', args=[zone.name])) self.assertEqual(reverse('login'), extract_redirect(response)) self.assertIsNotNone(reload_object(zone)) @@ -326,7 +326,7 @@ def test_deletes_zone(self): self.client_log_in(as_admin=True) - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.post( reverse('zone-del', args=[zone.name]), {'post': 'yes'}) @@ -354,7 +354,7 @@ def test_redirects_to_listing(self): self.client_log_in(as_admin=True) - zone = factory.make_zone() + zone = factory.make_Zone() response = self.client.post( reverse('zone-del', args=[zone.name]), {'post': 'yes'}) @@ -362,8 +362,8 @@ def test_does_not_delete_nodes(self): self.client_log_in(as_admin=True) - zone = factory.make_zone() - node = factory.make_node(zone=zone) + zone = factory.make_Zone() + node = factory.make_Node(zone=zone) response = self.client.post( reverse('zone-del', args=[zone.name]), {'post': 'yes'}) diff -Nru maas-1.5.4+bzr2294/src/maasserver/worker_user.py maas-1.7.6+bzr3376/src/maasserver/worker_user.py --- maas-1.5.4+bzr2294/src/maasserver/worker_user.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/worker_user.py 2015-07-10 01:27:14.000000000 +0000 @@ -3,7 +3,7 @@ """System user representing node-group workers. -The Celery workers access the MAAS API under this user identity. +Workers access the MAAS API under this user identity. """ from __future__ import ( @@ -21,24 +21,16 @@ ] from django.contrib.auth.models import User -from django.core.cache import cache user_name = 'maas-nodegroup-worker' -# Cache key for the worker user. -WORKER_USER_CACHE_KEY = 'worker-user-maas-cache-key' - def get_worker_user(): """Get the system user representing the node-group workers.""" - worker_user = cache.get(WORKER_USER_CACHE_KEY) - if worker_user is None: - worker_user, created = User.objects.get_or_create( - username=user_name, defaults=dict( - first_name="Node-group worker", - last_name="Special user", - email="maas-nodegroup-worker@localhost", - is_staff=False, is_superuser=False)) - cache.set(WORKER_USER_CACHE_KEY, worker_user) + worker_user, created = User.objects.get_or_create( + username=user_name, defaults=dict( + first_name="Node-group worker", last_name="Special user", + email="maas-nodegroup-worker@localhost", is_staff=False, + is_superuser=False)) return worker_user diff -Nru maas-1.5.4+bzr2294/src/maasserver/x509.py maas-1.7.6+bzr3376/src/maasserver/x509.py --- maas-1.5.4+bzr2294/src/maasserver/x509.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maasserver/x509.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,176 @@ +# Copyright 2014 Cloudbase Solutions SRL. +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, +) + +str = None + +__metaclass__ = type +__all__ = [] + +import getpass +import logging +import os +import random +import socket +from string import ( + ascii_lowercase, + ascii_uppercase, + digits, + ) + +import OpenSSL +from provisioningserver.utils.fs import ( + atomic_write, + ensure_dir, + read_text_file, + ) + + +logger = logging.getLogger(__name__) + + +class WinRMX509Error(Exception): + """Error when generating x509 certificate.""" + + +class WinRMX509: + """Generates X509 certificates compatible with Windows WinRM.""" + + KEY_SIZE = 2048 + PASSPHRASE_LENGTH = 21 + + def __init__(self, cert_name, upn_name=None, cert_dir=None): + self.store = self.get_ssl_dir(cert_dir) + self.cert_name = cert_name + self.upn_name = upn_name + if self.upn_name is None: + user = getpass.getuser() + host = socket.getfqdn() + self.upn_name = "%s@%s" % (user, host) + + self.pem_file = os.path.join(self.store, "%s.pem" % self.cert_name) + self.key_file = os.path.join(self.store, "%s.key" % self.cert_name) + self.pfx_file = os.path.join(self.store, "%s.pfx" % self.cert_name) + + def create_cert(self, print_cert=False): + """Generate a new certifficate, and save it to disk.""" + if os.path.isfile(self.pem_file): + raise WinRMX509Error( + "Certificate %s already exists." % self.pem_file) + + key, cert = self.get_key_and_cert() + self.write_cert(cert) + self.write_privatekey(key) + + if print_cert: + self.print_cert_details(self.pem_file) + + logger.debug("Exporting to PKCS12") + passwd = self.generate_passphrase() + try: + self.export_p12(key, cert, passwd) + logger.debug("Passphrase for exported p12: %s" % passwd) + except OpenSSL.crypto.Error as err: + raise WinRMX509Error("Failed to export p12: %s" % err) + + def get_key_and_cert(self): + """Return the private key and certificate for x509.""" + key = OpenSSL.crypto.PKey() + key.generate_key(OpenSSL.crypto.TYPE_RSA, self.KEY_SIZE) + cert = OpenSSL.crypto.X509() + cert.get_subject().CN = self.upn_name + subjectAltName = OpenSSL.crypto.X509Extension( + "subjectAltName", + True, + "otherName:1.3.6.1.4.1.311.20.2.3;UTF8:%s" % self.upn_name) + key_usage = OpenSSL.crypto.X509Extension( + "extendedKeyUsage", True, "clientAuth") + cert.set_serial_number(1000) + cert.gmtime_adj_notBefore(0) + cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60) + cert.add_extensions([subjectAltName, key_usage, ]) + cert.set_pubkey(key) + cert.set_issuer(cert.get_subject()) + cert.sign(key, 'sha1') + return key, cert + + def get_cert_details(self, pem_file): + """Return a dictionary containing X509 subject, thumbprint and + contents.""" + cert, contents = self.load_pem_file(pem_file) + subject = cert.get_subject().CN + thumb = cert.digest('SHA1') + return {'subject': subject, 'thumbprint': thumb, 'contents': contents} + + def write_privatekey(self, key): + """Write the private key to disk.""" + logger.debug("Writing key: %s" % self.key_file) + atomic_write( + OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key), + self.key_file) + + def write_cert(self, cert): + """Write the certificate to disk.""" + logger.debug("Writing certificate: %s" % self.pem_file) + atomic_write( + OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert), + self.pem_file) + + def print_cert_details(self, pem_file): + """Print x509 details to stdout.""" + details = self.get_cert_details(pem_file) + print("Certificate Subject: %s" % details['subject']) + print("Certificate Thumbprint: %s" % details['thumbprint']) + print("You may add the following cert in MAAS:") + print(details['contents']) + + def load_pem_file(self, pem_file): + """Load a PEM file. Returning `OpenSSL.crypto.X509` object and the + contents of the file. + + :param pem_file: file to load + """ + pem_data = read_text_file(pem_file) + try: + cert = OpenSSL.crypto.load_certificate( + OpenSSL.crypto.FILETYPE_PEM, pem_data) + except OpenSSL.crypto.Error as err: + raise WinRMX509Error("Failed to load certificate: %s" % err) + return cert, pem_data + + def export_p12(self, key, cert, passphrase): + """Create a pcks12 password protected container for the generated + certificates. + + :param key: Key file to add to PFX file + :param cert: Certificate file to add to PFX file + :param passphrase: export passphrase for PFX file + """ + p12 = OpenSSL.crypto.PKCS12() + p12.set_certificate(cert) + p12.set_privatekey(key) + atomic_write(p12.export(passphrase=passphrase), self.pfx_file) + + def get_ssl_dir(self, cert_dir=None): + """Return the directory in which to save the certificates. This also + ensures that the directory exists. + """ + if cert_dir is None: + home_dir = os.path.expanduser("~") + cert_dir = os.path.join(home_dir, '.ssl') + ensure_dir(cert_dir) + return cert_dir + + def generate_passphrase(self): + """Generate an alphanumeric random string to be used together with + `export_p12`. + """ + choices = ascii_uppercase + ascii_lowercase + digits + return ''.join( + random.choice(choices) for _ in range(self.PASSPHRASE_LENGTH)) diff -Nru maas-1.5.4+bzr2294/src/maastesting/celery.py maas-1.7.6+bzr3376/src/maastesting/celery.py --- maas-1.5.4+bzr2294/src/maastesting/celery.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/celery.py 2015-07-10 01:27:14.000000000 +0000 @@ -25,7 +25,7 @@ class CeleryFixture(Fixture): - """This fixture will make Celery run tasks in a synchronous fashion. + """This fixture will record Celery tasks as they're run. This fixture can be used directly:: diff -Nru maas-1.5.4+bzr2294/src/maastesting/crochet.py maas-1.7.6+bzr3376/src/maastesting/crochet.py --- maas-1.5.4+bzr2294/src/maastesting/crochet.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/crochet.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,102 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Support for testing with `crochet`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "EventualResultCatchingMixin", + ] + +import crochet +from testtools.content import ( + Content, + UTF8_TEXT, + ) +from testtools.matchers import Equals + + +class EventualResultCatchingMixin: + """A mix-in for tests that checks for unfired/unhandled `EventualResults`. + + It reports about all py:class:`crochet.EventualResults` that are unfired + or whose results have not been retrieved. A test detail is recorded for + each, then the test is force-failed at the last moment. + """ + + def setUp(self): + super(EventualResultCatchingMixin, self).setUp() + # Every EventualResult that crochet creates is registered into this + # registry. We'll check it after the test has finished. + registry = crochet._main._registry + # The registry stores EventualResults in a WeakSet, which means that + # unfired and unhandled results can be garbage collected before we get + # to see them. Here we patch in a regular set so that nothing gets + # garbage collected until we've been able to check the results. + self.addCleanup(setattr, registry, "_results", registry._results) + registry._results = set() + # While unravelling clean-ups is a good time to check the results. Any + # meaningful work represented by an EventualResult should have done + # should been done by now. + self.addCleanup(self.__checkResults, registry._results) + + def __checkResults(self, eventual_results): + fail_count = 0 + + # Go through all the EventualResults created in this test. + for eventual_result in eventual_results: + # If the result has been retrieved, fine, otherwise look closer. + if not eventual_result._result_retrieved: + fail_count += 1 + + try: + # Is there a result waiting to be retrieved? + result = eventual_result.wait(timeout=0) + except crochet.TimeoutError: + # No result yet. This could be because the result is wired + # up to a Deferred that hasn't fired yet, or because it + # hasn't yet been connected. + if eventual_result._deferred is None: + message = [ + "*** EventualResult has not fired:\n", + "%r\n" % (eventual_result,), + "*** It was not connected to a Deferred.\n", + ] + else: + message = [ + "*** EventualResult has not fired:\n", + "%r\n" % (eventual_result,), + "*** It was connected to a Deferred:\n", + "%r\n" % (eventual_result._deferred,), + ] + else: + # A result, but nothing has collected it. This can be + # caused by forgetting to call wait(). + message = [ + "*** EventualResult has fired:\n", + "%r\n" % (eventual_result,), + "*** It contained the following result:\n", + "%r\n" % (result,), + "*** but it was not collected.\n", + "*** Was result.wait() called?\n", + ] + + # Record the details with a unique name. + message = [block.encode("utf-8") for block in message] + self.addDetail( + "Unfired/unhandled EventualResult #%d" % fail_count, + Content(UTF8_TEXT, lambda: message)) + + # Use expectThat() so that other clean-up tasks run to completion + # before, at the last moment, the test is failed. + self.expectThat( + fail_count, Equals(0), "Unfired and/or unhandled " + "EventualResult(s); see test details.") diff -Nru maas-1.5.4+bzr2294/src/maastesting/djangotestcase.py maas-1.7.6+bzr3376/src/maastesting/djangotestcase.py --- maas-1.5.4+bzr2294/src/maastesting/djangotestcase.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/djangotestcase.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,8 +21,6 @@ from django.conf import settings -from django.core.cache import cache as django_cache -from django.core.management import call_command from django.core.management.commands import syncdb from django.core.signals import request_started from django.db import ( @@ -83,34 +81,14 @@ """ -def cleanup_db(testcase): - # Force a flush of the db: this is called by test classes based on - # django.test.TransactionTestCase at the beginning of each - # TransactionTestCase test but not at the end. The Django test runner - # avoids any problem by running all the TestCase tests and *then* - # all the TransactionTestCase tests. Since we use nose, we don't - # have that ordering and thus we need to manually flush the db after - # each TransactionTestCase test. Le Sigh. - if getattr(testcase, 'multi_db', False): - databases = connections - else: - databases = [DEFAULT_DB_ALIAS] - for db in databases: - call_command('flush', verbosity=0, interactive=False, database=db) - - class TransactionTestCase(MAASTestCase, django.test.TransactionTestCase): - """`TransactionTestCase` for Metal as a Service. + """`TransactionTestCase` for MAAS. A version of MAASTestCase that supports transactions. The basic Django TestCase class uses transactions to speed up tests - so this class should be used when tests involve transactions. + so this class should only be used when tests involve transactions. """ - def _fixture_teardown(self): - cleanup_db(self) - django_cache.clear() - super(TransactionTestCase, self)._fixture_teardown() class TestModelMixin: diff -Nru maas-1.5.4+bzr2294/src/maastesting/factory.py maas-1.7.6+bzr3376/src/maastesting/factory.py --- maas-1.5.4+bzr2294/src/maastesting/factory.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/factory.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,7 +21,9 @@ import datetime from functools import partial import httplib +import io from itertools import ( + count, imap, islice, repeat, @@ -32,6 +34,8 @@ import string import subprocess import time +import urllib2 +import urlparse from uuid import uuid1 from maastesting.fixtures import TempDirectory @@ -56,6 +60,20 @@ """ +def network_clashes(network, other_networks): + """Does the IP range for `network` clash with any in `other_networks`? + + :param network: An `IPNetwork`. + :param other_networks: An iterable of `IPNetwork` items. + :return: Whether the IP range for `network` overlaps with any of those + for the networks in `other_networks`. + """ + for other_network in other_networks: + if network in other_network or other_network in network: + return True + return False + + class Factory: random_letters = imap( @@ -75,39 +93,94 @@ random_octets = iter(random_octet, None) - def getRandomString(self, size=10, spaces=False): + def make_string(self, size=10, spaces=False, prefix=""): if spaces: - return "".join(islice(self.random_letters_with_spaces, size)) + return prefix + "".join( + islice(self.random_letters_with_spaces, size)) else: - return "".join(islice(self.random_letters, size)) + return prefix + "".join(islice(self.random_letters, size)) - def getRandomBytes(self, size=10): + def make_bytes(self, size=10): + """Return a `bytes` filled with random data.""" return os.urandom(size) - def getRandomUsername(self, size=10): + def make_username(self, size=10): + """Create an arbitrary user name (but not the actual user).""" return "".join(islice(self.random_letters_for_usernames, size)) - def getRandomEmail(self, login_size=10): - return "%s@example.com" % self.getRandomString(size=login_size) + def make_email_address(self, login_size=10): + """Generate an arbitrary email address.""" + return "%s@example.com" % self.make_string(size=login_size) - def getRandomStatusCode(self): + def make_status_code(self): + """Return an arbitrary HTTP status code.""" return next(self.random_http_responses) - def getRandomBoolean(self): + exception_type_names = (b"TestException#%d" % i for i in count(1)) + + def make_exception_type(self, bases=(Exception,), **namespace): + return type(next(self.exception_type_names), bases, namespace) + + def make_exception(self, message=None, bases=(Exception,), **namespace): + exc_type = self.make_exception_type(bases, **namespace) + return exc_type() if message is None else exc_type(message) + + def pick_bool(self): + """Return an arbitrary Boolean value (`True` or `False`).""" return random.choice((True, False)) - def getRandomPort(self, port_min=1024, port_max=65535): + def pick_port(self, port_min=1024, port_max=65535): assert port_min >= 0 and port_max <= 65535 return random.randint(port_min, port_max) - def getRandomIPAddress(self): + def make_vlan_tag(self, allow_none=False, but_not=None): + """Create a random VLAN tag. + + :param allow_none: Whether `None` ("no VLAN") can be allowed as an + outcome. If `True`, `None` will be included in the possible + results with a deliberately over-represented probability, in order + to help trip up bugs that might only show up once in about 4094 + calls otherwise. + :param but_not: A list of tags that should not be returned. Any zero + or `None` entries will be ignored. + """ + if but_not is None: + but_not = [] + if allow_none and self.pick_bool(): + return None + else: + for _ in range(100): + vlan_tag = random.randint(1, 0xffe) + if vlan_tag not in but_not: + return vlan_tag + raise TooManyRandomRetries("Could not find an available VLAN tag.") + + def make_ipv4_address(self): octets = islice(self.random_octets, 4) return '%d.%d.%d.%d' % tuple(octets) - def getRandomUUID(self): + def make_ipv6_address(self): + # We return from the fc00::/7 space because that's a private + # space and shouldn't cause problems of addressing the outside + # world. + network = IPNetwork('fc00::/7') + # We can't use random.choice() because there are too many + # elements in network. + random_address_index = random.randint(0, network.size - 1) + return unicode(IPAddress(network[random_address_index])) + + def make_ip_address(self): + if random.randint(0, 1): + return self.make_ipv6_address() + else: + return self.make_ipv4_address() + + def make_UUID(self): return unicode(uuid1()) - def getRandomNetwork(self, slash=None, but_not=None): + def _make_random_network( + self, slash=None, but_not=None, disjoint_from=None, + random_address_factory=None): """Generate a random IP network. :param slash: Netmask or bit width of the network, e.g. 24 or @@ -116,24 +189,77 @@ should not be returned. Use this when you need a different network from any returned previously. The new network may overlap any of these, but it won't be identical. + :param disjoint_from: Optional iterable of `IPNetwork` objects whose + IP ranges the new network must not overlap. + :param random_address_factory: A callable that returns a random IP + address. If not provided, will default to + Factory.make_ipv4_address(). :return: A network spanning at least 8 IP addresses (at most 29 bits). :rtype: :class:`IPNetwork` """ if but_not is None: but_not = [] + but_not = frozenset(but_not) + if disjoint_from is None: + disjoint_from = [] if slash is None: slash = random.randint(16, 29) - but_not = set(but_not) + if random_address_factory is None: + random_address_factory = self.make_ipv4_address + # Look randomly for a network that matches our criteria. for _ in range(100): - network = IPNetwork('%s/%s' % (self.getRandomIPAddress(), slash)) - if network not in but_not: + network = IPNetwork('%s/%s' % (random_address_factory(), slash)) + forbidden = (network in but_not) + clashes = network_clashes(network, disjoint_from) + if not forbidden and not clashes: return network raise TooManyRandomRetries("Could not find available network") - def getRandomIPInNetwork(self, network, but_not=None): + def make_ipv4_network(self, slash=None, but_not=None, disjoint_from=None): + """Generate a random IPv4 network. + + :param slash: Netmask or bit width of the network, e.g. 24 or + '255.255.255.0' for what used to be known as a class-C network. + :param but_not: Optional iterable of `IPNetwork` objects whose values + should not be returned. Use this when you need a different network + from any returned previously. The new network may overlap any of + these, but it won't be identical. + :param disjoint_from: Optional iterable of `IPNetwork` objects whose + IP ranges the new network must not overlap. + :return: A network spanning at least 8 IP addresses (at most 29 bits). + :rtype: :class:`IPNetwork` + """ + if slash is None: + slash = random.randint(16, 29) + return self._make_random_network( + slash=slash, but_not=but_not, disjoint_from=disjoint_from, + random_address_factory=self.make_ipv4_address) + + def make_ipv6_network(self, slash=None, but_not=None, disjoint_from=None): + """Generate a random IPv6 network. + + :param slash: Netmask or bit width of the network. If not + specified, will default to a bit width of between 112 (65536 + addresses) and 125 (8 addresses); + :param but_not: Optional iterable of `IPNetwork` objects whose values + should not be returned. Use this when you need a different network + from any returned previously. The new network may overlap any of + these, but it won't be identical. + :param disjoint_from: Optional iterable of `IPNetwork` objects whose + IP ranges the new network must not overlap. + :return: A network spanning at least 8 IP addresses. + :rtype: :class:`IPNetwork` + """ + if slash is None: + slash = random.randint(112, 125) + return self._make_random_network( + slash=slash, but_not=but_not, disjoint_from=disjoint_from, + random_address_factory=self.make_ipv6_address) + + def pick_ip_in_network(self, network, but_not=None): if but_not is None: but_not = [] - but_not = [IPAddress(but) for but in but_not] + but_not = [IPAddress(but) for but in but_not if but is not None] address = IPAddress(random.randint(network.first, network.last)) for _ in range(100): address = IPAddress(random.randint(network.first, network.last)) @@ -141,28 +267,41 @@ return bytes(address) raise TooManyRandomRetries("Could not find available IP in network") - def make_ip_range(self, network=None, but_not=None): - """Return a pair of IP addresses, with the first lower than the second. + def make_ipv4_range(self, network=None, but_not=None): + """Return a pair of IPv4 addresses. :param network: Return IP addresses within this network. :param but_not: A pair of addresses that should not be returned. :return: A pair of `IPAddress`. """ if network is None: - network = self.getRandomNetwork() + network = self.make_ipv4_network() if but_not is not None: low, high = but_not but_not = (IPAddress(low), IPAddress(high)) for _ in range(100): ip_range = tuple(sorted( - IPAddress(factory.getRandomIPInNetwork(network)) + IPAddress(factory.pick_ip_in_network(network)) for _ in range(2) )) if ip_range[0] < ip_range[1] and ip_range != but_not: return ip_range raise TooManyRandomRetries("Could not find available IP range") - def getRandomMACAddress(self, delimiter=":"): + make_ip_range = make_ipv4_range # DEPRECATED. + + def make_ipv6_range(self, network=None, but_not=None): + """Return a pair of IPv6 addresses. + + :param network: Return IP addresses within this network. + :param but_not: A pair of addresses that should not be returned. + :return: A pair of `IPAddress`. + """ + if network is None: + network = self.make_ipv6_network() + return self.make_ip_range(network=network, but_not=but_not) + + def make_mac_address(self, delimiter=":"): assert isinstance(delimiter, unicode) octets = islice(self.random_octets, 6) return delimiter.join(format(octet, "02x") for octet in octets) @@ -173,10 +312,10 @@ # guards against shortfalls as random IP addresses collide. leases = {} while len(leases) < num_leases: - leases[self.getRandomIPAddress()] = self.getRandomMACAddress() + leases[self.make_ipv4_address()] = self.make_mac_address() return leases - def getRandomDate(self, year=2011): + def make_date(self, year=2011): start = time.mktime(datetime.datetime(year, 1, 1).timetuple()) end = time.mktime(datetime.datetime(year + 1, 1, 1).timetuple()) stamp = random.randrange(start, end) @@ -205,9 +344,9 @@ :return: Path to the file. """ if name is None: - name = self.getRandomString() + name = self.make_string() if contents is None: - contents = self.getRandomString().encode('ascii') + contents = self.make_string().encode('ascii') path = os.path.join(location, name) with open(path, 'w') as f: f.write(contents) @@ -218,7 +357,7 @@ :param prefix: Optional prefix. Pass one to help make test failures and tracebacks easier to read! If you don't, you might as well - use `getRandomString`. + use `make_string`. :param sep: Separator that will go between the prefix and the random portion of the name. Defaults to a dash. :param size: Length of the random portion of the name. Don't get @@ -228,7 +367,7 @@ :return: A randomized unicode string. """ return sep.join( - filter(None, [prefix, self.getRandomString(size=size)])) + filter(None, [prefix, self.make_string(size=size)])) def make_hostname(self, prefix='host', *args, **kwargs): """Generate a random hostname. @@ -237,6 +376,67 @@ implicitely lowercases the hostnames.""" return self.make_name(prefix=prefix, *args, **kwargs).lower() + # Always select from a scheme that allows parameters in the URL so + # that we can round-trip a URL with params successfully (otherwise + # the params don't get parsed out of the path). + _make_parsed_url_schemes = tuple( + scheme for scheme in urlparse.uses_params + if scheme != "") + + def make_parsed_url( + self, scheme=None, netloc=None, path=None, params=None, + query=None, fragment=None): + """Generate a random parsed URL object. + + Contains randomly generated values for all parts of a URL: scheme, + location, path, parameters, query, and fragment. However, each part + can be overridden individually. + + :return: Instance of :py:class:`urlparse.ParseResult`. + """ + if scheme is None: + # Select a scheme that allows parameters; see above. + scheme = random.choice(self._make_parsed_url_schemes) + if netloc is None: + netloc = "%s.example.com" % self.make_name("netloc").lower() + if path is None: + # A leading forward-slash will be added in geturl() if we + # don't, so ensure it's here now so tests can compare URLs + # without worrying about it. + path = self.make_name("/path") + else: + # Same here with the forward-slash prefix. + if not path.startswith("/"): + path = "/" + path + if params is None: + params = self.make_name("params") + if query is None: + query = self.make_name("query") + if fragment is None: + fragment = self.make_name("fragment") + return urlparse.ParseResult( + scheme, netloc, path, params, query, fragment) + + def make_url( + self, scheme=None, netloc=None, path=None, params=None, + query=None, fragment=None): + """Generate a random URL. + + Contains randomly generated values for all parts of a URL: scheme, + location, path, parameters, query, and fragment. However, each part + can be overridden individually. + + :return: string + """ + return self.make_parsed_url( + scheme, netloc, path, params, query, fragment).geturl() + + def make_simple_http_url(self, netloc=None, path=None): + """Create an arbitrary HTTP URL with only a location and path.""" + return self.make_parsed_url( + scheme="http", netloc=netloc, path=path, params="", query="", + fragment="").geturl() + def make_names(self, *prefixes): """Generate random names. @@ -265,6 +465,19 @@ return tarball + def make_response(self, status_code, content, content_type=None): + """Return a similar response to that which `urllib2` returns.""" + if content_type is None: + headers_raw = b"" + else: + if isinstance(content_type, unicode): + content_type = content_type.encode("ascii") + headers_raw = b"Content-Type: %s" % content_type + headers = httplib.HTTPMessage(io.BytesIO(headers_raw)) + return urllib2.addinfourl( + fp=io.BytesIO(content), headers=headers, + url=None, code=status_code) + def make_streams(self, stdin=None, stdout=None, stderr=None): """Make a fake return value for a SSHClient.exec_command.""" # stdout.read() is called so stdout can't be None. @@ -273,6 +486,12 @@ return (stdin, stdout, stderr) + def make_CalledProcessError(self): + """Make a fake :py:class:`subprocess.CalledProcessError`.""" + return subprocess.CalledProcessError( + returncode=random.randint(1, 10), + cmd=[self.make_name("command")], + output=factory.make_bytes()) # Create factory singleton. factory = Factory() diff -Nru maas-1.5.4+bzr2294/src/maastesting/fixtures.py maas-1.7.6+bzr3376/src/maastesting/fixtures.py --- maas-1.5.4+bzr2294/src/maastesting/fixtures.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/fixtures.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Miscellaneous fixtures, here until they find a better home.""" @@ -13,6 +13,7 @@ __metaclass__ = type __all__ = [ + "CaptureStandardIO", "DisplayFixture", "LoggerSilencerFixture", "ProxiesDisabledFixture", @@ -20,6 +21,7 @@ "TempDirectory", ] +import io import logging import os from subprocess import ( @@ -33,6 +35,7 @@ from fixtures import ( EnvironmentVariableFixture, Fixture, + MonkeyPatch, ) from sst.actions import ( start, @@ -160,3 +163,24 @@ super(TempWDFixture, self).setUp() self.addCleanup(os.chdir, cwd) os.chdir(self.path) + + +class CaptureStandardIO(Fixture): + """Capture standard out/err. + + This is here to allow back-porting to 1.7; it exists in a more complete + form in MAAS >=1.8. + """ + + def setUp(self): + super(CaptureStandardIO, self).setUp() + self.stdout = io.StringIO() + self.useFixture(MonkeyPatch("sys.stdout", self.stdout)) + self.stderr = io.StringIO() + self.useFixture(MonkeyPatch("sys.stderr", self.stderr)) + + def getOutput(self): + return self.stdout.getvalue() + + def getError(self): + return self.stderr.getvalue() diff -Nru maas-1.5.4+bzr2294/src/maastesting/__init__.py maas-1.7.6+bzr3376/src/maastesting/__init__.py --- maas-1.5.4+bzr2294/src/maastesting/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -17,6 +17,7 @@ "root", ] +import copy from os.path import ( abspath, dirname, @@ -28,6 +29,8 @@ from sys import executable from warnings import filterwarnings +import mock + # The root of the source tree. root = abspath(join(dirname(realpath(__file__)), pardir, pardir)) @@ -52,3 +55,14 @@ filterwarnings('default', category=BytesWarning, module=packages_expr) filterwarnings('default', category=DeprecationWarning, module=packages_expr) filterwarnings('default', category=ImportWarning, module=packages_expr) + +# Make sure that sentinel objects are not copied. +sentinel_type = type(mock.sentinel.foo) +copy._copy_dispatch[sentinel_type] = copy._copy_immutable +copy._deepcopy_dispatch[sentinel_type] = copy._copy_immutable + +try: + import maasfascist + maasfascist # Silence lint. +except ImportError: + pass diff -Nru maas-1.5.4+bzr2294/src/maastesting/matchers.py maas-1.7.6+bzr3376/src/maastesting/matchers.py --- maas-1.5.4+bzr2294/src/maastesting/matchers.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/matchers.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """testtools custom matchers""" @@ -13,7 +13,11 @@ __metaclass__ = type __all__ = [ + 'HasAttribute', 'IsCallable', + 'IsCallableMock', + 'IsFiredDeferred', + 'IsUnfiredDeferred', 'MockAnyCall', 'MockCalledOnceWith', 'MockCalledWith', @@ -22,7 +26,8 @@ 'Provides', ] -from mock import Mock +from functools import partial + from testtools.matchers import ( AfterPreprocessing, Annotate, @@ -32,8 +37,10 @@ Matcher, MatchesAll, MatchesPredicate, + MatchesStructure, Mismatch, ) +from twisted.internet import defer class IsCallable(Matcher): @@ -55,6 +62,46 @@ iface.providedBy, "%%r does not provide %s" % iface.getName()) +class HasAttribute(Matcher): + """Match if the given attribute is available.""" + + def __init__(self, attribute): + super(HasAttribute, self).__init__() + self.attribute = attribute + + def match(self, something): + try: + getattr(something, self.attribute) + except AttributeError: + return Mismatch( + "%r does not have a %r attribute" % ( + something, self.attribute)) + + def __str__(self): + return "%s(%r)" % (self.__class__.__name__, self.attribute) + + +class IsCallableMock(Matcher): + """Match if the subject looks like a mock that's callable. + + `mock.create_autospec` can return objects like functions and modules that + are also callable mocks, but we can't use a simple ``isinstance`` test to + ascertain that. Here we assume the presence of ``return_value`` and + ``side_effect`` attributes means that we've found a callable mock. These + attributes are defined in `mock.CallableMixin`. + """ + + def match(self, something): + return MatchesAll( + HasAttribute("return_value"), + HasAttribute("side_effect"), + IsCallable(), + ).match(something) + + def __str__(self): + return self.__class__.__name__ + + def get_mock_calls(mock): """Return a list of all calls made to the given `mock`. @@ -88,10 +135,14 @@ class MockCalledOnceWith(MockCalledWith): - """Matches if the matchee Mock was called once with the provided args. + """Matches if the matchee `Mock` was called once, with the provided args. - Use of Mock.assert_called_once_with is discouraged as it passes if you typo - the function name. + To pass the match, the mock must have been called exactly once, and with + the given arguments. Use `mock.ANY` for any parameters whose values don't + matter for the match. + + Use this instead of `Mock.assert_called_once_with`, which just always + passes blindly if you mis-spell the name. """ def match(self, mock): @@ -141,7 +192,7 @@ def match(self, mock): matcher = MatchesAll( - IsInstance(Mock), + IsCallableMock(), Annotate( "calls do not match", AfterPreprocessing( @@ -164,7 +215,7 @@ def match(self, mock): matcher = MatchesAll( - IsInstance(Mock), + IsCallableMock(), Annotate( "mock has been called", AfterPreprocessing( @@ -174,3 +225,48 @@ first_only=True, ) return matcher.match(mock) + + +class IsFiredDeferred(Matcher): + """Matches if the subject is a fired `Deferred`.""" + + def __str__(self): + return self.__class__.__name__ + + def match(self, thing): + if not isinstance(thing, defer.Deferred): + return Mismatch("%r is not a Deferred" % (thing,)) + if not thing.called: + return Mismatch("%r has not been called" % (thing,)) + return None + + +class IsUnfiredDeferred(Matcher): + """Matches if the subject is an unfired `Deferred`.""" + + def __str__(self): + return self.__class__.__name__ + + def match(self, thing): + if not isinstance(thing, defer.Deferred): + return Mismatch("%r is not a Deferred" % (thing,)) + if thing.called: + return Mismatch( + "%r has been called (result=%r)" % (thing, thing.result)) + return None + + +class MatchesPartialCall(Matcher): + + def __init__(self, func, *args, **keywords): + super(MatchesPartialCall, self).__init__() + self.expected = partial(func, *args, **keywords) + + def match(self, observed): + matcher = MatchesAll( + IsInstance(partial), + MatchesStructure.fromExample( + self.expected, "func", "args", "keywords"), + first_only=True, + ) + return matcher.match(observed) diff -Nru maas-1.5.4+bzr2294/src/maastesting/testcase.py maas-1.7.6+bzr3376/src/maastesting/testcase.py --- maas-1.5.4+bzr2294/src/maastesting/testcase.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/testcase.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,12 +14,15 @@ __metaclass__ = type __all__ = [ 'MAASTestCase', + 'MAASTwistedRunTest', ] from contextlib import contextmanager import doctest +import types import unittest +from maastesting.crochet import EventualResultCatchingMixin from maastesting.factory import factory from maastesting.fixtures import TempDirectory from maastesting.scenarios import WithScenarios @@ -28,7 +31,9 @@ from nose.tools import nottest import testresources import testtools +from testtools import deferredruntest import testtools.matchers +from twisted.internet import defer @nottest @@ -53,7 +58,10 @@ yield -class MAASTestCase(WithScenarios, testtools.TestCase): +class MAASTestCase( + WithScenarios, + EventualResultCatchingMixin, + testtools.TestCase): """Base `TestCase` for MAAS. Supports `test resources`_, `test scenarios`_, and `fixtures`_. @@ -164,3 +172,48 @@ value = mock.MagicMock() super(MAASTestCase, self).patch(obj, attribute, value) return value + + def patch_autospec(self, obj, attribute, spec_set=False, instance=False): + """Patch `obj.attribute` with an auto-spec of itself. + + See `mock.create_autospec` and `patch`. + + :return: The patched-in object. + """ + spec = getattr(obj, attribute) + value = mock.create_autospec(spec, spec_set, instance) + super(MAASTestCase, self).patch(obj, attribute, value) + return value + + +class InvalidTest(Exception): + """Signifies that the test is invalid; it's not a good test.""" + + +class MAASTwistedRunTest(deferredruntest.AsynchronousDeferredRunTest): + """A specialisation of testtools' `AsynchronousDeferredRunTest`. + + It catches a common problem when writing tests for Twisted: forgetting to + decorate a test with `inlineCallbacks` that needs it. + """ + + def _check_for_generator(self, result): + if isinstance(result, types.GeneratorType): + raise InvalidTest( + "Test returned a generator. Should it be " + "decorated with inlineCallbacks?") + else: + return result + + def _run_user(self, function, *args): + """Override testtools' `_run_user`. + + `_run_user` is used in testtools for running functions in the test + case that may or may not return a `Deferred`. Here we also check for + generators, a good sign that a test case (or `setUp`, or `tearDown`) + is yielding without `inlineCallbacks` to support it. + """ + d = defer.maybeDeferred(function, *args) + d.addCallback(self._check_for_generator) + d.addErrback(self._got_user_failure) + return d diff -Nru maas-1.5.4+bzr2294/src/maastesting/tests/test_celery.py maas-1.7.6+bzr3376/src/maastesting/tests/test_celery.py --- maas-1.5.4+bzr2294/src/maastesting/tests/test_celery.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/tests/test_celery.py 2015-07-10 01:27:14.000000000 +0000 @@ -16,7 +16,6 @@ import random -from celery import current_app from celery.decorators import task from celery.result import EagerResult from maastesting.celery import CeleryFixture @@ -40,10 +39,6 @@ super(TestCeleryFixture, self).setUp() self.celery = self.useFixture(CeleryFixture()) - def test_celery_config(self): - self.assertTrue(current_app.conf.CELERY_ALWAYS_EAGER) - self.assertTrue(current_app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS) - def test_celery_eagerresult_contains_result(self): # The result is an instance of EagerResult and it contains the actual # result. diff -Nru maas-1.5.4+bzr2294/src/maastesting/tests/test_factory.py maas-1.7.6+bzr3376/src/maastesting/tests/test_factory.py --- maas-1.5.4+bzr2294/src/maastesting/tests/test_factory.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/tests/test_factory.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,5 @@ # Copyright 2012-2014 Canonical Ltd. This software is licensed under the + # GNU Affero General Public License version 3 (see the file LICENSE). """Test the factory where appropriate. Don't overdo this.""" @@ -17,11 +18,16 @@ from datetime import datetime from itertools import count import os.path +import random from random import randint import subprocess -from maastesting.factory import factory +from maastesting.factory import ( + factory, + TooManyRandomRetries, + ) from maastesting.testcase import MAASTestCase +from maastesting.utils import FakeRandInt from netaddr import ( IPAddress, IPNetwork, @@ -38,50 +44,115 @@ class TestFactory(MAASTestCase): - def test_getRandomString_respects_size(self): + def test_make_string_respects_size(self): sizes = [1, 10, 100] - random_strings = [factory.getRandomString(size) for size in sizes] + random_strings = [factory.make_string(size) for size in sizes] self.assertEqual(sizes, [len(string) for string in random_strings]) - def test_getRandomBoolean_returns_bool(self): - self.assertIsInstance(factory.getRandomBoolean(), bool) + def test_pick_bool_returns_bool(self): + self.assertIsInstance(factory.pick_bool(), bool) - def test_getRandomPort_returns_int(self): - self.assertIsInstance(factory.getRandomPort(), int) + def test_pick_port_returns_int(self): + self.assertIsInstance(factory.pick_port(), int) - def test_getRandomIPAddress(self): - ip_address = factory.getRandomIPAddress() + def test_make_vlan_tag_excludes_None_by_default(self): + # Artificially limit randint to a very narrow range, to guarantee + # some repetition in its output, and virtually guarantee that we test + # both outcomes of the flip-a-coin call in make_vlan_tag. + self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) + outcomes = {factory.make_vlan_tag() for _ in range(1000)} + self.assertEqual({1}, outcomes) + + def test_make_vlan_tag_includes_None_if_allow_none(self): + self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) + self.assertEqual( + {None, 1}, + { + factory.make_vlan_tag(allow_none=True) + for _ in range(1000) + }) + + def test_make_ipv4_address(self): + ip_address = factory.make_ipv4_address() self.assertIsInstance(ip_address, unicode) octets = ip_address.split('.') self.assertEqual(4, len(octets)) for octet in octets: self.assertTrue(0 <= int(octet) <= 255) - def test_getRandomIPAddress_but_not(self): + def test_make_ipv4_address_but_not(self): # We want to look for clashes between identical IPs and/or netmasks. # Narrow down the range of randomness so we have a decent chance of # triggering a clash, but not so far that we'll loop for very long # trying to find a network we haven't seen already. self.patch( - factory, 'getRandomIPAddress', + factory, 'make_ipv4_address', lambda: '10.%d.0.0' % randint(1, 200)) networks = [] for _ in range(100): - networks.append(factory.getRandomNetwork(but_not=networks)) + networks.append(factory.make_ipv4_network(but_not=networks)) self.assertEquals(len(networks), len(set(networks))) - def test_getRandomUUID(self): - uuid = factory.getRandomUUID() + def test_make_UUID(self): + uuid = factory.make_UUID() self.assertIsInstance(uuid, unicode) self.assertEqual(36, len(uuid)) - def test_getRandomNetwork(self): - network = factory.getRandomNetwork() + def test_make_ipv4_network(self): + network = factory.make_ipv4_network() self.assertIsInstance(network, IPNetwork) - def test_getRandomIPInNetwork(self): - network = factory.getRandomNetwork() - ip = factory.getRandomIPInNetwork(network) + def test_make_ipv4_network_respects_but_not(self): + self.patch(factory, 'make_ipv4_address').return_value = IPAddress( + '10.1.1.0') + self.assertRaises( + TooManyRandomRetries, + factory.make_ipv4_network, + slash=24, but_not=[IPNetwork('10.1.1.0/24')]) + + def test_make_ipv4_network_returns_network_not_in_but_not(self): + self.patch(factory, 'make_ipv4_address').return_value = IPAddress( + '10.1.1.0') + self.assertEqual( + IPNetwork('10.1.1.0/24'), + factory.make_ipv4_network( + slash=24, but_not=[IPNetwork('10.9.9.0/24')])) + + def test_make_ipv4_network_may_overlap_but_not(self): + self.patch(factory, 'make_ipv4_address').return_value = IPAddress( + '10.1.1.0') + self.assertEqual( + IPNetwork('10.1.1.0/24'), + factory.make_ipv4_network( + slash=24, but_not=[IPNetwork('10.1.0.0/16')])) + + def test_make_ipv4_network_avoids_network_in_disjoint_from(self): + self.patch(factory, 'make_ipv4_address').return_value = IPAddress( + '10.1.1.0') + self.assertRaises( + TooManyRandomRetries, + factory.make_ipv4_network, + slash=24, disjoint_from=[IPNetwork('10.1.1.0/24')]) + + def test_make_ipv4_network_avoids_network_overlapping_disjoint_from(self): + self.patch(factory, 'make_ipv4_address').return_value = IPAddress( + '10.1.1.0') + self.assertRaises( + TooManyRandomRetries, + factory.make_ipv4_network, + slash=24, disjoint_from=[IPNetwork('10.1.0.0/16')]) + + def test_make_ipv4_network_returns_network_disjoint_from(self): + existing_network = factory.make_ipv4_network() + new_network = factory.make_ipv4_network( + disjoint_from=[existing_network]) + self.assertNotEqual(existing_network, new_network) + self.assertNotIn(new_network, existing_network) + self.assertNotIn(existing_network, new_network) + + def test_pick_ip_in_network(self): + network = factory.make_ipv4_network() + ip = factory.pick_ip_in_network(network) self.assertTrue( network.first <= IPAddress(ip).value <= network.last) @@ -92,7 +163,7 @@ self.assertLess(low, high) def test_make_ip_range_obeys_network(self): - network = factory.getRandomNetwork() + network = factory.make_ipv4_network() low, high = factory.make_ip_range(network) self.assertIn(low, network) self.assertIn(high, network) @@ -100,31 +171,31 @@ def test_make_ip_range_returns_low_and_high(self): # Make a very very small network, to maximise the chances of exposure # if the method gets this wrong e.g. by returning identical addresses. - low, high = factory.make_ip_range(factory.getRandomNetwork(slash=31)) + low, high = factory.make_ip_range(factory.make_ipv4_network(slash=31)) self.assertLess(low, high) def test_make_ip_range_obeys_but_not(self): # Make a very very small network, to maximise the chances of exposure # if the method gets this wrong. - network = factory.getRandomNetwork(slash=30) + network = factory.make_ipv4_network(slash=30) first_low, first_high = factory.make_ip_range(network) second_low, second_high = factory.make_ip_range( network, but_not=(first_low, first_high)) self.assertNotEqual((first_low, first_high), (second_low, second_high)) - def test_getRandomDate_returns_datetime(self): - self.assertIsInstance(factory.getRandomDate(), datetime) + def test_make_date_returns_datetime(self): + self.assertIsInstance(factory.make_date(), datetime) - def test_getRandomMACAddress(self): - mac_address = factory.getRandomMACAddress() + def test_make_mac_address(self): + mac_address = factory.make_mac_address() self.assertIsInstance(mac_address, unicode) self.assertEqual(17, len(mac_address)) for hex_octet in mac_address.split(":"): self.assertTrue(0 <= int(hex_octet, 16) <= 255) - def test_getRandomMACAddress_alternative_delimiter(self): + def test_make_mac_address_alternative_delimiter(self): self.patch(factory, "random_octets", count(0x3a)) - mac_address = factory.getRandomMACAddress(delimiter="-") + mac_address = factory.make_mac_address(delimiter="-") self.assertEqual("3a-3b-3c-3d-3e-3f", mac_address) def test_make_random_leases_maps_ips_to_macs(self): @@ -156,7 +227,7 @@ self.assertThat(factory.make_file(self.make_dir()), FileExists()) def test_make_file_writes_contents(self): - contents = factory.getRandomString().encode('ascii') + contents = factory.make_string().encode('ascii') self.assertThat( factory.make_file(self.make_dir(), contents=contents), FileContains(contents)) @@ -167,14 +238,14 @@ self.assertNotEqual('', contents) def test_make_file_uses_given_name(self): - name = factory.getRandomString() + name = factory.make_string() self.assertEqual( name, os.path.basename(factory.make_file(self.make_dir(), name=name))) def test_make_file_uses_given_dir(self): directory = self.make_dir() - name = factory.getRandomString() + name = factory.make_string() self.assertEqual( (directory, name), os.path.split(factory.make_file(directory, name=name))) @@ -197,7 +268,7 @@ def test_make_name_uses_configurable_separator(self): sep = 'SEPARATOR' - prefix = factory.getRandomString(3) + prefix = factory.make_string(3) self.assertThat( factory.make_name(prefix, sep=sep), StartsWith(prefix + sep)) @@ -221,7 +292,7 @@ def test_make_tarball_writes_tarball(self): filename = factory.make_name() - contents = {filename: factory.getRandomString()} + contents = {filename: factory.make_string()} tarball = factory.make_tarball(self.make_dir(), contents) diff -Nru maas-1.5.4+bzr2294/src/maastesting/tests/test_lint.py maas-1.7.6+bzr3376/src/maastesting/tests/test_lint.py --- maas-1.5.4+bzr2294/src/maastesting/tests/test_lint.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/tests/test_lint.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,17 +14,102 @@ __metaclass__ = type __all__ = [] -from subprocess import check_call -from unittest import skip +from difflib import unified_diff +from itertools import ifilter +from os import ( + mkdir, + walk, + ) +from os.path import ( + join, + relpath, + ) +from pipes import quote +from shutil import ( + copy2, + rmtree, + ) +from subprocess import ( + PIPE, + Popen, + STDOUT, + ) +from tempfile import mkdtemp from maastesting import root from maastesting.testcase import MAASTestCase +from testtools.content import ( + Content, + UTF8_TEXT, + ) class TestLint(MAASTestCase): - @skip( - "XXX: GavinPanella 2014-01-09 bug=1267472: " - "This needs altering once the new package structure is in place.") + def execute(self, *command): + process = Popen(command, stdout=PIPE, stderr=STDOUT) + output, _ = process.communicate() + if len(output) != 0: + name = "stdout/err from `%s`" % " ".join(map(quote, command)) + self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) + self.assertEqual(0, process.wait(), "(return code is not zero)") + def test_that_there_is_no_lint_in_the_tree(self): - check_call(("make", "-C", root, "lint")) + self.execute("make", "--quiet", "-C", root, "lint") + + def test_that_imports_are_formatted(self): + # We're going to export all Python source code to a new, freshly + # created, tree, then run `make format` in it. + root_export = mkdtemp(prefix=".export.", dir=root) + self.addCleanup(rmtree, root_export, ignore_errors=True) + + # Useful predicates. + p_visible = lambda name: not name.startswith(".") + p_is_python = lambda name: name.endswith(".py") + + # Copy all visible Python source files over. + for dirpath, dirnames, filenames in walk(root): + dirnames[:] = ifilter(p_visible, dirnames) + dirpath_export = join(root_export, relpath(dirpath, start=root)) + for dirname in dirnames: + mkdir(join(dirpath_export, dirname)) + for filename in ifilter(p_visible, filenames): + if p_is_python(filename): + src = join(dirpath, filename) + dst = join(dirpath_export, filename) + copy2(src, dst) + + # We'll need the Makefile and format-imports too. + copy2(join(root, "Makefile"), root_export) + copy2( + join(root, "utilities", "format-imports"), + join(root_export, "utilities", "format-imports")) + + # Format imports in the exported tree. + self.execute("make", "--quiet", "-C", root_export, "format") + + # This will record a unified diff between the original source code and + # the reformatted source code, should there be any. + diff = [] + + # For each file in the export, compare it to its counterpart in the + # original tree. + for dirpath, dirnames, filenames in walk(root_export): + dirpath_relative = relpath(dirpath, start=root_export) + dirpath_original = join(root, dirpath_relative) + for filename in ifilter(p_is_python, filenames): + filepath_original = join(dirpath_original, filename) + with open(filepath_original, "rb") as file_original: + file_lines_original = file_original.readlines() + filepath_formatted = join(dirpath, filename) + with open(filepath_formatted, "rb") as file_formatted: + file_lines_formatted = file_formatted.readlines() + diff.extend(unified_diff( + file_lines_original, file_lines_formatted, + filepath_original, filepath_formatted)) + + if len(diff) != 0: + self.addDetail("diff", Content(UTF8_TEXT, lambda: diff)) + self.fail( + "Some imports are not formatted; see the diff for the " + "missing changes. Use `make format` to address them.") diff -Nru maas-1.5.4+bzr2294/src/maastesting/tests/test_matchers.py maas-1.7.6+bzr3376/src/maastesting/tests/test_matchers.py --- maas-1.5.4+bzr2294/src/maastesting/tests/test_matchers.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/tests/test_matchers.py 2015-07-10 01:27:14.000000000 +0000 @@ -15,8 +15,13 @@ __all__ = [] from maastesting import matchers +from maastesting.factory import factory from maastesting.matchers import ( + HasAttribute, IsCallable, + IsCallableMock, + IsFiredDeferred, + IsUnfiredDeferred, MockAnyCall, MockCalledOnceWith, MockCalledWith, @@ -26,13 +31,16 @@ from maastesting.testcase import MAASTestCase from mock import ( call, + create_autospec, Mock, + NonCallableMock, sentinel, ) from testtools.matchers import ( MatchesStructure, Mismatch, ) +from twisted.internet import defer class TestIsCallable(MAASTestCase): @@ -222,3 +230,93 @@ def test_has_useful_string_representation(self): matcher = MockNotCalled() self.assertEqual("MockNotCalled", matcher.__str__()) + + +class TestHasAttribute(MAASTestCase, MockTestMixin): + + def test__returns_none_if_attribute_exists(self): + attribute = factory.make_string(3, prefix="attr") + setattr(self, attribute, factory.make_name("value")) + matcher = HasAttribute(attribute) + result = matcher.match(self) + self.assertIsNone(result) + + def test__returns_mismatch_if_attribute_does_not_exist(self): + attribute = factory.make_string(3, prefix="attr") + matcher = HasAttribute(attribute) + result = matcher.match(self) + self.assertMismatch( + result, " does not have a %r attribute" % attribute) + + +class TestIsCallableMock(MAASTestCase, MockTestMixin): + + def test__returns_none_when_its_a_callable_mock(self): + mock = Mock() + matcher = IsCallableMock() + result = matcher.match(mock) + self.assertIsNone(result) + + def test__returns_none_when_its_a_callable_autospec(self): + mock = create_autospec(lambda: None) + matcher = IsCallableMock() + result = matcher.match(mock) + self.assertIsNone(result) + + def test__returns_mismatch_when_its_a_non_callable_mock(self): + mock = NonCallableMock() + matcher = IsCallableMock() + result = matcher.match(mock) + self.assertMismatch( + result, " is not callable") + + def test__returns_mismatch_when_its_a_non_callable_autospec(self): + mock = create_autospec(None) + matcher = IsCallableMock() + result = matcher.match(mock) + self.assertMismatch( + result, " is not callable") + + def test__returns_mismatch_when_its_a_non_callable_object(self): + matcher = IsCallableMock() + result = matcher.match(object()) + self.assertMismatch( + result, " is not callable") + + +class TestIsFiredDeferred(MAASTestCase, MockTestMixin): + + def test__matches_fired_deferred(self): + d = defer.Deferred() + d.callback(None) + self.assertThat(d, IsFiredDeferred()) + + def test__does_not_match_unfired_deferred(self): + d = defer.Deferred() + self.assertMismatch( + IsFiredDeferred().match(d), + " has not been called") + + def test__does_not_match_non_deferred(self): + self.assertMismatch( + IsFiredDeferred().match(object()), + " is not a Deferred") + + +class TestIsUnfiredDeferred(MAASTestCase, MockTestMixin): + + def test__matches_unfired_deferred(self): + d = defer.Deferred() + self.assertThat(d, IsUnfiredDeferred()) + + def test__does_not_match_fired_deferred(self): + d = defer.Deferred() + d.callback(None) + self.assertMismatch( + IsUnfiredDeferred().match(d), + " has been called (result=None)") + + def test__does_not_match_non_deferred(self): + self.assertMismatch( + IsUnfiredDeferred().match(object()), + " is not a Deferred") diff -Nru maas-1.5.4+bzr2294/src/maastesting/tests/test_testcase.py maas-1.7.6+bzr3376/src/maastesting/tests/test_testcase.py --- maas-1.5.4+bzr2294/src/maastesting/tests/test_testcase.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/tests/test_testcase.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,8 +19,18 @@ from tempfile import mkdtemp from maastesting.factory import factory +from maastesting.matchers import ( + IsCallableMock, + MockCalledOnceWith, + MockCallsMatch, + ) from maastesting.testcase import MAASTestCase -from mock import MagicMock +from mock import ( + call, + MagicMock, + sentinel, + ) +import mock as mock_module from testtools.matchers import ( DirExists, FileExists, @@ -61,3 +71,43 @@ attribute = self.patch(self, attribute_name) self.assertIs(getattr(self, attribute_name), attribute) self.assertIsInstance(attribute, MagicMock) + + def method_to_be_patched(self, a, b): + return sentinel.method_to_be_patched + + def test_patch_autospec_creates_autospec_from_target(self): + # Grab a reference to this now. + method_to_be_patched = self.method_to_be_patched + + # It's simpler to test that create_autospec has been called than it is + # to test the result of calling it; mock does some clever things to do + # what it does that make comparisons hard. + create_autospec = self.patch(mock_module, "create_autospec") + create_autospec.return_value = sentinel.autospec + + method_to_be_patched_autospec = self.patch_autospec( + self, "method_to_be_patched", spec_set=sentinel.spec_set, + instance=sentinel.instance) + + self.assertIs(sentinel.autospec, method_to_be_patched_autospec) + self.assertIs(sentinel.autospec, self.method_to_be_patched) + self.assertThat( + create_autospec, MockCalledOnceWith( + method_to_be_patched, sentinel.spec_set, sentinel.instance)) + + def test_patch_autospec_really_leaves_an_autospec_behind(self): + self.patch_autospec(self, "method_to_be_patched") + # The patched method is now a callable mock. + self.assertThat(self.method_to_be_patched, IsCallableMock()) + # The patched method can be called with positional or keyword + # arguments. + self.method_to_be_patched(1, 2) + self.method_to_be_patched(3, b=4) + self.method_to_be_patched(a=5, b=6) + self.assertThat(self.method_to_be_patched, MockCallsMatch( + call(1, 2), call(3, b=4), call(a=5, b=6))) + # Calling the patched method with unrecognised arguments or not + # enough arguments results in an exception. + self.assertRaises(TypeError, self.method_to_be_patched, c=7) + self.assertRaises(TypeError, self.method_to_be_patched, 8) + self.assertRaises(TypeError, self.method_to_be_patched, b=9) diff -Nru maas-1.5.4+bzr2294/src/maastesting/utils.py maas-1.7.6+bzr3376/src/maastesting/utils.py --- maas-1.5.4+bzr2294/src/maastesting/utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/maastesting/utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing utilities.""" @@ -17,8 +17,8 @@ "content_from_file", "extract_word_list", "get_write_time", + "FakeRandInt", "preexec_fn", - "retries", "run_isolated", "sample_binary_data", ] @@ -31,10 +31,6 @@ stderr, stdout, ) -from time import ( - sleep, - time, - ) from traceback import print_exc import subunit @@ -85,24 +81,6 @@ signal.signal(signal.SIGPIPE, signal.SIG_DFL) -def retries(timeout=30, delay=1): - """Helper for retrying something, sleeping between attempts. - - Yields ``(elapsed, remaining)`` tuples, giving times in seconds. - - @param timeout: From now, how long to keep iterating, in seconds. - @param delay: The sleep between each iteration, in seconds. - """ - start = time() - end = start + timeout - for now in iter(time, None): - if now < end: - yield now - start, end - now - sleep(min(delay, end - now)) - else: - break - - def run_isolated(cls, self, result): """Run a test suite or case in a subprocess. @@ -153,3 +131,25 @@ # (1) Provided, of course, that man know only about ASCII and # UTF. sample_binary_data = codecs.BOM64_LE + codecs.BOM64_BE + b'\x00\xff\x00' + + +class FakeRandInt: + """Fake `randint` with forced limitations on its range. + + This lets you set a forced minimum, and/or a forced maximum, on the range + of any call. For example, if you pass `forced_maximum=3`, then a call + will never return more than 3. If you don't set a maximum, or if the + call's maximum argument is less than the forced maximum, then the call's + maximum will be respected. + """ + def __init__(self, real_randint, forced_minimum=None, forced_maximum=None): + self.real_randint = real_randint + self.minimum = forced_minimum + self.maximum = forced_maximum + + def __call__(self, minimum, maximum): + if self.minimum is not None: + minimum = max(minimum, self.minimum) + if self.maximum is not None: + maximum = min(maximum, self.maximum) + return self.real_randint(minimum, maximum) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/address.py maas-1.7.6+bzr3376/src/metadataserver/address.py --- maas-1.5.4+bzr2294/src/metadataserver/address.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/address.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Figure out server address for the maas_url setting.""" @@ -13,17 +13,16 @@ __metaclass__ = type __all__ = [ - 'guess_server_address', + 'guess_server_host', ] -from fcntl import ioctl from os import environ import re import socket -import struct from subprocess import check_output from metadataserver import logger +from provisioningserver.utils.network import get_all_addresses_for_interface # fcntl operation as defined in . This is GNU/Linux-specific! SIOCGIFADDR = 0x8915 @@ -76,23 +75,35 @@ def get_ip_address(interface): - """Get the IP address for a given network interface.""" - # Apparently the netifaces module would do this for us. - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - interface_name = struct.pack(b'256s', interface[:15]) + """Get the first IP address for a given network interface. + + :return: An `IPAddress` instance for the first IP address on the + interface. If the interface has both IPv4 and IPv6 addresses, + the v4 address will be preferred. Otherwise the returned address + will be the first result of a sort on the set of addresses on + the interface. + """ try: - info = ioctl(s.fileno(), SIOCGIFADDR, interface_name) - except IOError as e: + # get_all_addresses_for_interface yields IPAddress instances. + # When sorted, IPAddress guarantees that IPv4 addresses will + # sort before IPv6, so we just return the first address that + # we've found. + all_addresses = sorted(get_all_addresses_for_interface(interface)) + return all_addresses[0] + except Exception as e: logger.warn( - "Could not determine address for apparent default interface %s " - "(%s)" + "Could not determine address for apparent default interface " + "%s (%s)" % (interface, e)) return None - return socket.inet_ntoa(info[20:24]) -def guess_server_address(): - """Make a guess as to this server's IP address.""" +def guess_server_host(): + """Make a guess as to this server's IP address or hostname. + + :return: IP address or hostname. + :rtype: unicode + """ ip_route_output = get_command_output( '/bin/ip', '-oneline', 'route', 'show') interface = find_default_interface(ip_route_output) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/api.py maas-1.7.6+bzr3376/src/metadataserver/api.py --- maas-1.5.4+bzr2294/src/metadataserver/api.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/api.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Metadata API.""" @@ -28,12 +28,12 @@ from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.shortcuts import get_object_or_404 -from maasserver.api import store_node_power_parameters -from maasserver.api_support import ( +from maasserver.api.nodes import store_node_power_parameters +from maasserver.api.support import ( operation, OperationsHandler, ) -from maasserver.api_utils import ( +from maasserver.api.utils import ( extract_oauth_key, get_mandatory_param, ) @@ -50,7 +50,9 @@ MACAddress, Node, SSHKey, + SSLKey, ) +from maasserver.models.event import Event from maasserver.models.tag import Tag from maasserver.populate_tags import populate_tags_for_single_node from maasserver.preseed import ( @@ -62,18 +64,26 @@ from maasserver.utils import find_nodegroup from maasserver.utils.orm import get_one from metadataserver import logger -from metadataserver.enum import COMMISSIONING_STATUS +from metadataserver.enum import ( + RESULT_TYPE, + SIGNAL_STATUS, + ) from metadataserver.fields import Bin from metadataserver.models import ( CommissioningScript, - NodeCommissionResult, NodeKey, + NodeResult, NodeUserData, ) from metadataserver.models.commissioningscript import ( - BUILTIN_COMMISSIONING_SCRIPTS, + get_builtin_commissioning_scripts, ) +from metadataserver.user_data import poweroff from piston.utils import rc +from provisioningserver.events import ( + EVENT_DETAILS, + EVENT_TYPES, + ) class UnknownMetadataVersion(MAASAPINotFound): @@ -168,20 +178,32 @@ create = update = delete = None fields = ('maas-commissioning-scripts', 'meta-data', 'user-data') - # States in which a node is allowed to signal commissioning status. - # (Only in Commissioning state, however, will it have any effect.) + # States in which a node is allowed to signal + # commissioning/installing status. + # (Only in Commissioning/Deploying state, however, + # will it have any effect.) signalable_states = [ + NODE_STATUS.BROKEN, NODE_STATUS.COMMISSIONING, + NODE_STATUS.FAILED_COMMISSIONING, + NODE_STATUS.DEPLOYING, + NODE_STATUS.FAILED_DEPLOYMENT, NODE_STATUS.READY, - NODE_STATUS.FAILED_TESTS, + NODE_STATUS.DISK_ERASING, ] + effective_signalable_states = [ + NODE_STATUS.COMMISSIONING, + NODE_STATUS.DEPLOYING, + NODE_STATUS.DISK_ERASING, + ] + # Statuses that a commissioning node may signal, and the respective # state transitions that they trigger on the node. signaling_statuses = { - COMMISSIONING_STATUS.OK: NODE_STATUS.READY, - COMMISSIONING_STATUS.FAILED: NODE_STATUS.FAILED_TESTS, - COMMISSIONING_STATUS.WORKING: None, + SIGNAL_STATUS.OK: NODE_STATUS.READY, + SIGNAL_STATUS.FAILED: NODE_STATUS.FAILED_COMMISSIONING, + SIGNAL_STATUS.WORKING: None, } def read(self, request, version, mac=None): @@ -195,59 +217,103 @@ shown_fields.remove('user-data') return make_list_response(sorted(shown_fields)) + def _store_installation_results(self, node, request): + """Store installation result file for `node`.""" + for name, uploaded_file in request.FILES.items(): + raw_content = uploaded_file.read() + NodeResult.objects.store_data( + node, name, script_result=0, + result_type=RESULT_TYPE.INSTALLATION, data=Bin(raw_content)) + def _store_commissioning_results(self, node, request): """Store commissioning result files for `node`.""" script_result = int(request.POST.get('script_result', 0)) for name, uploaded_file in request.FILES.items(): raw_content = uploaded_file.read() - if name in BUILTIN_COMMISSIONING_SCRIPTS: - postprocess_hook = BUILTIN_COMMISSIONING_SCRIPTS[name]['hook'] + builtin_commissioning_scripts = ( + get_builtin_commissioning_scripts()) + if name in builtin_commissioning_scripts: + postprocess_hook = builtin_commissioning_scripts[name]['hook'] postprocess_hook( node=node, output=raw_content, exit_status=script_result) - NodeCommissionResult.objects.store_data( - node, name, script_result, Bin(raw_content)) + NodeResult.objects.store_data( + node, name, script_result, + result_type=RESULT_TYPE.COMMISSIONING, data=Bin(raw_content)) @operation(idempotent=False) def signal(self, request, version=None, mac=None): - """Signal commissioning status. + """Signal commissioning/installation status. - A commissioning node can call this to report progress of the - commissioning process to the metadata server. + A commissioning/installing node can call this to report progress of + the commissioning/installation process to the metadata server. - Calling this from a node that is not Commissioning, Ready, or - Failed Tests is an error. Signaling completion more than once is not - an error; all but the first successful call are ignored. - - :param status: A commissioning status code. This can be "OK" (to - signal that commissioning has completed successfully), or "FAILED" - (to signal failure), or "WORKING" (for progress reports). + Calling this from a node that is not Allocated, Commissioning, Ready, + or Failed Tests is an error. Signaling completion more than once is + not an error; all but the first successful call are ignored. + + :param status: A commissioning/installation status code. This can be + "OK" (to signal that commissioning/installation has completed + successfully), or "FAILED" (to signal failure), or "WORKING" (for + progress reports). :param script_result: If this call uploads files, this parameter must be provided and will be stored as the return value for the script which produced these files. - :param error: An optional error string. If given, this will be stored + :param error: An optional error string. If given, this will be stored (overwriting any previous error string), and displayed in the MAAS - UI. If not given, any previous error string will be cleared. + UI. If not given, any previous error string will be cleared. """ node = get_queried_node(request, for_mac=mac) status = get_mandatory_param(request.POST, 'status') if node.status not in self.signalable_states: raise NodeStateViolation( - "Node wasn't commissioning (status is %s)" + "Node wasn't commissioning/installing (status is %s)" % NODE_STATUS_CHOICES_DICT[node.status]) + # These statuses are acceptable for commissioning, disk erasing, + # and deploying. if status not in self.signaling_statuses: raise MAASAPIBadRequest( - "Unknown commissioning status: '%s'" % status) + "Unknown commissioning/installation status: '%s'" % status) - if node.status != NODE_STATUS.COMMISSIONING: - # Already registered. Nothing to be done. + if node.status not in self.effective_signalable_states: + # If commissioning, it is already registered. Nothing to be done. + # If it is installing, should be in deploying state. return rc.ALL_OK - self._store_commissioning_results(node, request) - store_node_power_parameters(node, request) + if node.status == NODE_STATUS.COMMISSIONING: + # Ensure that any IP addresses are forcefully released in case + # the host didn't bother doing that. No static IPs are assigned + # at this stage, so we just deal with the dynamic ones. + if status != SIGNAL_STATUS.WORKING: + node.delete_host_maps(set(node.dynamic_ip_addresses())) + self._store_commissioning_results(node, request) + # XXX 2014-10-21 newell, bug=1382075 + # Auto detection for IPMI tries to save power parameters + # for Moonshot. This causes issues if the node's power type + # is already MSCM as it uses SSH instead of IPMI. This fix + # is temporary as power parameters should not be overwritten + # during commissioning because MAAS already has knowledge to + # boot the node. + # See MP discussion bug=1389808, for further details on why + # we are using bug fix 1382075 here. + if node.power_type != "mscm": + store_node_power_parameters(node, request) + node.stop_transition_monitor() + target_status = self.signaling_statuses.get(status) + elif node.status == NODE_STATUS.DEPLOYING: + self._store_installation_results(node, request) + if status == SIGNAL_STATUS.FAILED: + node.mark_failed("Failed to get installation result.") + target_status = None + elif node.status == NODE_STATUS.DISK_ERASING: + if status == SIGNAL_STATUS.OK: + # disk erasing complete, release node + node.release() + elif status == SIGNAL_STATUS.FAILED: + node.mark_failed("Failed to erase disks.") + target_status = None - target_status = self.signaling_statuses.get(status) if target_status in (None, node.status): # No status change. Nothing to be done. return rc.ALL_OK @@ -268,7 +334,7 @@ def netboot_off(self, request, version=None, mac=None): """Turn off netboot on the node. - A commissioning node can call this to turn off netbooting when + A deploying node can call this to turn off netbooting when it finishes installing itself. """ node = get_queried_node(request, for_mac=mac) @@ -286,7 +352,7 @@ class MetaDataHandler(VersionIndexHandler): """Meta-data listing for a given version.""" - fields = ('instance-id', 'local-hostname', 'public-keys') + fields = ('instance-id', 'local-hostname', 'public-keys', 'x509') def get_attribute_producer(self, item): """Return a callable to deliver a given metadata item. @@ -307,6 +373,7 @@ 'local-hostname': self.local_hostname, 'instance-id': self.instance_id, 'public-keys': self.public_keys, + 'x509': self.ssl_certs, } return producers[field] @@ -342,6 +409,11 @@ return make_list_response( SSHKey.objects.get_keys_for_user(user=node.owner)) + def ssl_certs(self, node, version, item): + """ Produce x509 certs attribute. """ + return make_list_response( + SSLKey.objects.get_keys_for_user(user=node.owner)) + class UserDataHandler(MetadataViewHandler): """User-data blob for a given version.""" @@ -350,9 +422,19 @@ check_version(version) node = get_queried_node(request, for_mac=mac) try: + # When a node is deploying, cloud-init's request + # for user-data is when MAAS hands the node + # off to a user. + if node.status == NODE_STATUS.DEPLOYING: + node.end_deployment() + # If this node is supposed to be powered off, serve the + # 'poweroff' userdata. + if node.get_boot_purpose() == 'poweroff': + user_data = poweroff.generate_user_data(node=node) + else: + user_data = NodeUserData.objects.get_user_data(node) return HttpResponse( - NodeUserData.objects.get_user_data(node), - mimetype='application/octet-stream') + user_data, mimetype='application/octet-stream') except NodeUserData.DoesNotExist: logger.info( "No user data registered for node named %s" % node.hostname) @@ -455,4 +537,15 @@ """ node = get_object_or_404(Node, system_id=system_id) node.set_netboot(False) + + # Build and register an event for "node installation finished". + # This is a best-guess. At the moment, netboot_off() only gets + # called when the node has finished installing, so it's an + # accurate predictor of the end of the install process. + type_name = EVENT_TYPES.NODE_INSTALLATION_FINISHED + event_details = EVENT_DETAILS[type_name] + Event.objects.register_event_and_event_type( + node.system_id, type_name, type_level=event_details.level, + type_description=event_details.description, + event_description="Node disabled netboot") return rc.ALL_OK diff -Nru maas-1.5.4+bzr2294/src/metadataserver/commissioning/snippets.py maas-1.7.6+bzr3376/src/metadataserver/commissioning/snippets.py --- maas-1.5.4+bzr2294/src/metadataserver/commissioning/snippets.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/commissioning/snippets.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -# Copyright 2012-2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Low-level routines for access to snippets. - -These are used by the user-data code, but also by `setup.py`. That's why -importing this must not pull in any unnecessary framework modules etc. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'list_snippets', - 'read_snippet', - 'strip_name', - 'get_snippet_context', - 'get_userdata_template_dir', - ] - -import os - -from provisioningserver.utils import ( - locate_config, - read_text_file, - ) - - -USERDATA_BASE_DIR = 'templates/commissioning-user-data' - - -def get_userdata_template_dir(): - """Return the absolute location of the userdata - template directory.""" - return locate_config(USERDATA_BASE_DIR) - - -def get_snippet_context(snippets_dir=None, encoding='utf-8'): - """Return the context of all of the snippets.""" - if snippets_dir is None: - snippets_dir = os.path.join(get_userdata_template_dir(), 'snippets') - snippets = { - strip_name(name): read_snippet(snippets_dir, name, encoding=encoding) - for name in list_snippets(snippets_dir) - } - return snippets - - -def read_snippet(snippets_dir, name, encoding='utf-8'): - """Read a snippet file. - - :rtype: `unicode` - """ - return read_text_file(os.path.join(snippets_dir, name), encoding=encoding) - - -def is_snippet(filename): - """Does `filename` represent a valid snippet name?""" - return all([ - not filename.startswith('.'), - filename != '__init__.py', - filename != 'tests', - not filename.endswith('.pyc'), - not filename.endswith('~'), - ]) - - -def list_snippets(snippets_dir): - """List names of available snippets.""" - return filter(is_snippet, os.listdir(snippets_dir)) - - -def strip_name(snippet_name): - """Canonicalize a snippet name.""" - # Dot suffixes do not work well in tempita variable names. - return snippet_name.replace('.', '_') diff -Nru maas-1.5.4+bzr2294/src/metadataserver/commissioning/tests/test_snippets.py maas-1.7.6+bzr3376/src/metadataserver/commissioning/tests/test_snippets.py --- maas-1.5.4+bzr2294/src/metadataserver/commissioning/tests/test_snippets.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/commissioning/tests/test_snippets.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -# Copyright 2012-2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test the snippets-related support routines for commissioning user data.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import os.path - -from maastesting.factory import factory -from maastesting.testcase import MAASTestCase -from metadataserver.commissioning.snippets import ( - get_snippet_context, - is_snippet, - list_snippets, - read_snippet, - strip_name, - ) - - -class TestSnippets(MAASTestCase): - - def test_read_snippet_reads_snippet_file(self): - contents = factory.getRandomString() - snippet = self.make_file(contents=contents) - self.assertEqual( - contents, - read_snippet(os.path.dirname(snippet), os.path.basename(snippet))) - - def test_strip_name_leaves_simple_names_intact(self): - simple_name = factory.getRandomString() - self.assertEqual(simple_name, strip_name(simple_name)) - - def test_strip_name_replaces_dots(self): - self.assertEqual('_x_y_', strip_name('.x.y.')) - - def test_is_snippet(self): - are_snippets = { - 'snippet': True, - 'with-dash': True, - 'module.py': True, - '.backup': False, - 'backup~': False, - 'module.pyc': False, - '__init__.pyc': False, - 'tests': False, - } - self.assertEqual( - are_snippets, - {name: is_snippet(name) for name in are_snippets}) - - def test_list_snippets(self): - snippets_dir = self.make_dir() - factory.make_file(snippets_dir, 'snippet') - factory.make_file(snippets_dir, '.backup.pyc') - self.assertItemsEqual(['snippet'], list_snippets(snippets_dir)) - - def test_get_snippet_context(self): - contents = factory.getRandomString() - snippets_dir = self.make_dir() - factory.make_file(snippets_dir, 'snippet.py', contents=contents) - self.assertItemsEqual( - {'snippet_py': contents}, - get_snippet_context(snippets_dir=snippets_dir)) - - def test_get_snippet_context_empty_if_no_snippets(self): - snippets_dir = self.make_dir() - context = {} - self.assertEqual( - context, get_snippet_context(snippets_dir=snippets_dir)) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/commissioning/tests/test_user_data.py maas-1.7.6+bzr3376/src/metadataserver/commissioning/tests/test_user_data.py --- maas-1.5.4+bzr2294/src/metadataserver/commissioning/tests/test_user_data.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/commissioning/tests/test_user_data.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test generation of commissioning user data.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from maasserver.testing.factory import factory -from maasserver.testing.testcase import MAASServerTestCase -from maastesting.matchers import MockCalledWith -from metadataserver.commissioning import user_data -from metadataserver.commissioning.user_data import generate_user_data -from mock import ( - Mock, - sentinel, - ) -from testtools.matchers import ContainsAll - - -class TestUserData(MAASServerTestCase): - - def test_generate_user_data_produces_commissioning_script(self): - # generate_user_data produces a commissioning script which contains - # both definitions and use of various commands in python. - self.assertThat( - generate_user_data(), ContainsAll({ - 'maas-get', - 'maas-signal', - 'maas-ipmi-autodetect', - 'def authenticate_headers', - 'def encode_multipart_data', - })) - - def test_nodegroup_passed_to_get_preseed_context(self): - # I don't care about what effect it has, I just want to know - # that it was passed as it can affect the contents of - # `server_host` in the context. - fake_context = dict(http_proxy=factory.getRandomString()) - user_data.get_preseed_context = Mock(return_value=fake_context) - nodegroup = sentinel.nodegroup - generate_user_data(nodegroup) - self.assertThat( - user_data.get_preseed_context, - MockCalledWith(nodegroup=nodegroup)) - - def test_generate_user_data_generates_mime_multipart(self): - # The generate_user_data func should create a MIME multipart - # message consisting of cloud-config and x-shellscript - # attachments. - self.assertThat( - generate_user_data(), ContainsAll({ - 'multipart', - 'Content-Type: text/cloud-config', - 'Content-Type: text/x-shellscript', - })) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/commissioning/user_data.py maas-1.7.6+bzr3376/src/metadataserver/commissioning/user_data.py --- maas-1.5.4+bzr2294/src/metadataserver/commissioning/user_data.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/commissioning/user_data.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -# Copyright 2012-2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Generate commissioning user-data from template and code snippets. - -This combines the `user_data.template` and the snippets of code in the -`snippets` directory into the main commissioning script. - -Its contents are not customizable. To inject custom code, use the -:class:`CommissioningScript` model. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'generate_user_data', - ] - -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -import os.path - -from maasserver.preseed import get_preseed_context -from metadataserver.commissioning.snippets import ( - get_snippet_context, - get_userdata_template_dir, - ) -import tempita - - -ENCODING = 'utf-8' - - -def generate_user_data(nodegroup=None): - """Produce the main commissioning script. - - The main template file contains references to so-called ``snippets'' - which are read in here, and substituted. In addition, the regular - preseed context variables are available (such as 'http_proxy'). - - The final result is a MIME multipart message that consists of a - 'cloud-config' part and an 'x-shellscript' part. This allows maximum - flexibility with cloud-init as we read in a template - 'user_data_config.template' to set cloud-init configs before the script - is run. - - :rtype: `bytes` - """ - commissioning_dir = get_userdata_template_dir() - userdata_template_file = os.path.join( - commissioning_dir, 'user_data.template') - config_template_file = os.path.join( - commissioning_dir, 'user_data_config.template') - userdata_template = tempita.Template.from_filename( - userdata_template_file, encoding=ENCODING) - config_template = tempita.Template.from_filename( - config_template_file, encoding=ENCODING) - # The preseed context is a dict containing various configs that the - # templates can use. - preseed_context = get_preseed_context(nodegroup=nodegroup) - - # Render the snippets in the main template. - snippets = get_snippet_context(encoding=ENCODING) - snippets.update(preseed_context) - userdata = userdata_template.substitute(snippets).encode(ENCODING) - - # Render the config. - config = config_template.substitute(preseed_context) - - # Create a MIME multipart message from the config and the userdata. - config_part = MIMEText(config, 'cloud-config', ENCODING) - config_part.add_header( - 'Content-Disposition', 'attachment; filename="config"') - data_part = MIMEText(userdata, 'x-shellscript', ENCODING) - data_part.add_header( - 'Content-Disposition', 'attachment; filename="user_data.sh"') - combined = MIMEMultipart() - combined.attach(config_part) - combined.attach(data_part) - return combined.as_string() diff -Nru maas-1.5.4+bzr2294/src/metadataserver/deployment/maas_configure_interfaces.py maas-1.7.6+bzr3376/src/metadataserver/deployment/maas_configure_interfaces.py --- maas-1.5.4+bzr2294/src/metadataserver/deployment/maas_configure_interfaces.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/deployment/maas_configure_interfaces.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,350 @@ +#!/usr/bin/env python2.7 +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Configure a MAAS node's network interfaces. + +This script is meant to be run on a MAAS node, either during installation or +after the installed operating system has booted. It figures out for itself +whether the installed operating system is one where it knows how to configure +the network. If not, it will log a warning and exit without error, so that +it can be run on any operating system regardless of whether it is supported. + +At the moment, the script does only one job: on Ubuntu systems it writes +`/etc/network/` configuration to set static IPv6 addresses and gateways for +network interfaces that connect to IPv6 networks. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type + +from argparse import ArgumentParser +from errno import ENOENT +from logging import getLogger +from os import ( + listdir, + rename, + ) +import os.path +from random import randint +from subprocess import check_call + + +logger = getLogger('maas_configure_interfaces') + + +class BadArgument(Exception): + """Incorrect or malformed argument.""" + + +def normalise_mac(mac): + """Normalise the "spelling" of a MAC address.""" + return mac.lower().strip() + + +def split_ip_mac_pair(argument_string): + """Turn an `IP=MAC` string into a tuple `(IP, MAC)`.""" + parts = argument_string.split('=', 1) + if len(parts) != 2: + raise BadArgument("Not a valid IP=MAC pair: '%s'." % argument_string) + [ip, mac] = parts + return ip, normalise_mac(mac) + + +def prepare_parser(): + """Set up an arguments parser with this script's options.""" + parser = ArgumentParser(description=__doc__) + parser.add_argument( + '--static-ip', '-s', type=split_ip_mac_pair, action='append', + default=[], + help=( + "Set a given static IP address on the network interface that has " + "a given MAC address. Pass an IP=MAC address pair, separated by " + "an equals sign. This option may be used multiple times.")) + parser.add_argument( + '--gateway', '-g', type=split_ip_mac_pair, action='append', + default=[], + help=( + "Set a router IP address for the network interface that has a " + "given MAC address. Pass an IP=MAC address pair, separated by " + "an equals sign. This option may be used multiple times.")) + parser.add_argument( + '--config-dir', '-d', default='/etc/network', + help=( + "Location where this script can write configuration snippets " + "for network interfaces.")) + parser.add_argument( + '--update-interfaces', '-u', action='store_true', + help="Update /etc/network/interfaces to use the generated config.") + parser.add_argument( + '--restart-interfaces', '-r', action='store_true', + help="Restart network interfaces after configuring.") + parser.add_argument( + '--name-interfaces', '-n', action='store_true', + help=( + "Set fixed names for network interfaces, so that each will keep " + "its name across reboots. On Linux systems this writes udev " + "rules to prevent interfaces from switching names arbitrarily.")) + return parser + + +def read_file(path): + """Return the contents of the file at `path`.""" + with open(path, 'rb') as infile: + return infile.read() + + +def map_interfaces_by_mac(): + """Map network interfaces' MAC addresses to interface names. + + The input to this function is the system's current networking + configuration. It does not attempt to parse configuration files. + + :return: A dict mapping each normalised MAC address to a list of network + interfaces that have that MAC address. + """ + # Query network interfaces based on /sys/class/net/. This script runs in a + # very limited environment, so pulling in external dependencies like the + # netifaces package is undesirable. + interfaces = listdir('/sys/class/net') + macs_to_interfaces = {} + for interface in interfaces: + try: + mac = read_file( + os.path.join('/sys/class/net', interface, 'address')) + except IOError as e: + # Tolerate file-not-found errors, to absorb any variability in + # the /sys/class/net API that doesn't concern us. + if e.errno != ENOENT: + raise + else: + mac = normalise_mac(mac) + macs_to_interfaces.setdefault(mac, []) + macs_to_interfaces[mac].append(interface) + + return macs_to_interfaces + + +def map_addresses_by_interface(interfaces_by_mac, ip_mac_pairs): + """Map network interfaces to IP addresses. + + Use this to transform a sequence of IP/MAC pairs as given on the command + line into a mapping from network interface names to lists of IP addresses. + + :param interfaces_by_mac: A dict as produced by `map_interfaces_by_mac`. + :param ip_mac_pairs: An iterable of IP/MAC address pairs, as tuples. + The MAC addresses must be normalised. + :return: A dict mapping interface names to the lists of IP addresses that + correspond to those interfaces' MAC addresses. + """ + mapping = {} + for ip, mac in ip_mac_pairs: + interfaces = interfaces_by_mac.get(mac, []) + for interface in interfaces: + mapping.setdefault(interface, []) + mapping[interface].append(ip) + return mapping + + +def compose_config_stanza(interface, ips, gateways): + """Return a configuration stanza for a given network interface. + + :param interface: Network interface name. + :param ips: A list of IPv6 addresses. + :param gateways: A list of router IP addresses. + :return: Text of an `/etc/network/interfaces.d` configuration stanza. + """ + return '\n'.join( + ["iface %s inet6 static" % interface] + + ["\tnetmask 64"] + + ["\taddress %s" % ip for ip in ips] + + ["\tgateway %s" % gateway for gateway in gateways]) + + +def compose_config_file(interfaces_by_mac, addresses_by_interface, + gateways_by_interface): + """Return a network interfaces configuration file. + + :param interfaces_by_mac: Dict mapping MAC addresses to lists of + interfaces that have those addresses. + :param addresses_by_interface: Dict mapping MAC addresses to lists of + static IP addresses that should be assigned to the interfaces + associated with those MACs. + :param gateways_by_interface: Dict mapping interface names + :return: Text of an `/etc/network/interfaces.d` snippet. + """ + stanzas = '\n\n'.join( + compose_config_stanza( + interface, ips, gateways_by_interface.get(interface, [])) + for interface, ips in addresses_by_interface.items()) + return ( + "# MAAS-generated static interface configurations.\n\n%s\n" + % stanzas) + + +# Name of the MAAS-generated network interfaces config file. +MAAS_INTERFACES_CONFIG = 'maas-config' + + +def locate_maas_config(config_dir): + """Return the location of the MAAS interfaces config file.""" + return os.path.join(config_dir, 'interfaces.d', MAAS_INTERFACES_CONFIG) + + +def write_file(path, text, encoding='utf-8'): + """Atomically write `text` to file at `path`.""" + content = text.encode(encoding) + temp_file = path + '.new-%d' % randint(10000, 99999) + with open(temp_file, 'wb') as outfile: + outfile.write(content) + rename(temp_file, path) + + +def configure_static_addresses(config_dir, ip_mac_pairs, gateway_mac_pairs, + interfaces_by_mac): + """Write interfaces config file for static addresses. + + :param config_dir: Location of interfaces config directory. + :param ip_mac_pairs: List of pairs, each of a static IP address and the + MAC address for the network interface that should have that address. + :param gateway_mac_pairs: List of pairs, each of a gateway address and a + MAC address for the network interface that should use that gateway. + :param interfaces_by_mac: Dict mapping each MAC address on the system to + a list of network interfaces that have that MAC address. + :return: The list of affected network interfaces. + """ + if not os.path.isfile(os.path.join(config_dir, 'interfaces')): + logger.warn( + "Not supported yet: This does not look like a " + "Debian/Ubuntu system. Not configuring networking.") + return [] + addresses_by_interface = map_addresses_by_interface( + interfaces_by_mac, ip_mac_pairs) + gateways_by_interface = map_addresses_by_interface( + interfaces_by_mac, gateway_mac_pairs) + interfaces_file = locate_maas_config(config_dir) + config = compose_config_file( + addresses_by_interface, addresses_by_interface, gateways_by_interface) + write_file(interfaces_file, config) + return sorted(addresses_by_interface.keys()) + + +def update_interfaces_file(config_dir): + """Update `/etc/network/interfaces` to include our generated config. + + This is likely to fail when not running as root. + + If the file already mentions the MAAS config file, this function assumes + that it was updated with the MAAS config in mind, and won't touch it. + + :param config_dir: Location for `/etc/network`. The MAAS config will have + been written to its `interfaces.d`; the `interfaces` config file in the + same directory will be rewritten. + """ + interfaces_file = os.path.join(config_dir, 'interfaces') + with open(interfaces_file, 'rb') as infile: + interfaces_config = infile.read().decode('utf-8') + if MAAS_INTERFACES_CONFIG not in interfaces_config: + new_config = "%s\n\nsource interfaces.d/%s\n" % ( + interfaces_config, + MAAS_INTERFACES_CONFIG, + ) + write_file(interfaces_file, new_config) + + +def restart_interfaces(interfaces): + """Restart each of the given network interfaces. + + Call this after updating the systems network configuration if you want the + new configuration to take effect right away. + """ + for interface in interfaces: + check_call(['ifdown', interface]) + check_call(['ifup', interface]) + + +def compose_udev_equality(key, value): + """Return a udev comparison clause, like `ACTION=="add"`.""" + assert key == key.upper() + return '%s=="%s"' % (key, value) + + +def compose_udev_attr_equality(attribute, value): + """Return a udev attribute comparison clause, like `ATTR{type}=="1"`.""" + assert attribute == attribute.lower() + return 'ATTR{%s}=="%s"' % (attribute, value) + + +def compose_udev_setting(key, value): + """Return a udev assignment clause, like `NAME="eth0"`.""" + assert key == key.upper() + return '%s="%s"' % (key, value) + + +def generate_udev_rule(interface, mac): + """Return a udev rule to set the name of network interface with `mac`. + + The rule ends up as a single line looking something like: + + SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", + ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0" + """ + rule = ', '.join([ + compose_udev_equality('SUBSYSTEM', 'net'), + compose_udev_equality('ACTION', 'add'), + compose_udev_equality('DRIVERS', '?*'), + compose_udev_attr_equality('address', mac), + compose_udev_setting('NAME', interface), + ]) + return '%s\n' % rule + + +def name_interfaces(interfaces_by_mac): + """Configure fixed names for network interfaces.""" + # Write to the same udev file that would otherwise be automatically + # populated with similar rules on the node's first boot. This provides + # the least surprise to the node's user, who would expect to see the + # rules in its standard location. + # + # (We can't wait for the rules to be written on first boot, because by + # then two or more network interfaces might have switched names.) + # + # Writing the rules to a file with a sequence number below 70 would also + # work, but a higher one does not. + udev_file = '/etc/udev/rules.d/70-persistent-net.rules' + header = "# MAAS-generated fixed names for network interfaces.\n\n" + udev_content = header + '\n\n'.join( + generate_udev_rule(interface, mac) + for mac, interfaces in interfaces_by_mac.items() + for interface in interfaces + if mac != '00:00:00:00:00:00') + try: + write_file(udev_file, udev_content) + except IOError as e: + if e.errno == ENOENT: + logger.warn( + "No udev rules directory. Not naming network interfaces.") + else: + raise + + +if __name__ == "__main__": + args = prepare_parser().parse_args() + interfaces_by_mac = map_interfaces_by_mac() + configured_interfaces = configure_static_addresses( + args.config_dir, ip_mac_pairs=args.static_ip, + gateway_mac_pairs=args.gateway, interfaces_by_mac=interfaces_by_mac) + if len(configured_interfaces) > 0: + if args.update_interfaces: + update_interfaces_file(args.config_dir) + if args.restart_interfaces: + restart_interfaces(configured_interfaces) + if args.name_interfaces: + name_interfaces(interfaces_by_mac) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/deployment/tests/test_maas_configure_interfaces.py maas-1.7.6+bzr3376/src/metadataserver/deployment/tests/test_maas_configure_interfaces.py --- maas-1.5.4+bzr2294/src/metadataserver/deployment/tests/test_maas_configure_interfaces.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/deployment/tests/test_maas_configure_interfaces.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,541 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the node-side `configure-interfaces` script.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from argparse import ArgumentParser +from errno import ( + EACCES, + ENOENT, + ) +from os import ( + makedirs, + remove, + ) +import os.path +from random import randint +from textwrap import dedent + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from maastesting.testcase import MAASTestCase +import metadataserver.deployment.maas_configure_interfaces as script +from mock import ( + ANY, + call, + ) +from testtools.matchers import ( + ContainsAll, + Equals, + FileContains, + FileExists, + ) + + +class TestPrepareParser(MAASTestCase): + + def test__returns_parser(self): + self.assertIsInstance(script.prepare_parser(), ArgumentParser) + + def test__accepts_empty_command_line(self): + parser = script.prepare_parser() + self.assertIsNotNone(parser.parse_args([])) + + def test__accepts_typical_command_line(self): + parser = script.prepare_parser() + config_dir = factory.make_name('etc-network') + ip = factory.make_ipv6_address() + gateway = factory.make_ipv6_address() + mac = factory.make_mac_address() + args = parser.parse_args([ + '--config-dir=%s' % config_dir, + '--update-interfaces', + '--restart-interfaces', + '--static-ip=%s=%s' % (ip, mac), + '--gateway=%s=%s' % (gateway, mac), + '--name-interfaces', + ]) + self.expectThat(args.config_dir, Equals(config_dir)) + self.expectThat(args.static_ip, Equals([(ip, mac)])) + self.expectThat(args.gateway, Equals([(gateway, mac)])) + self.assertTrue(args.update_interfaces) + self.assertTrue(args.restart_interfaces) + self.assertTrue(args.name_interfaces) + + def test__leaves_dangerous_options_off_by_default(self): + defaults = script.prepare_parser().parse_args([]) + self.assertFalse(defaults.update_interfaces) + self.assertFalse(defaults.restart_interfaces) + self.assertFalse(defaults.name_interfaces) + + def test__parses_multiple_ip_mac_pairs(self): + parser = script.prepare_parser() + pairs = [ + (factory.make_ipv6_address(), factory.make_mac_address()) + for _ in range(randint(2, 4)) + ] + args = ['--static-ip=%s=%s' % pair for pair in pairs] + self.assertItemsEqual(pairs, parser.parse_args(args).static_ip) + + def test__checks_for_obviously_malformed_ip_mac_pairs(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + parser = script.prepare_parser() + self.assertRaises( + script.BadArgument, + parser.parse_args, ['--static-ip', '%s+%s' % (ip, mac)]) + + +def make_denormalised_mac(): + """Create a MAC address that is not in its normalised form.""" + return " %s " % factory.make_mac_address().upper() + + +class TestSplitIPPair(MAASTestCase): + + def test__splits_ip_mac_pairs(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + self.assertEqual( + (ip, mac), + script.split_ip_mac_pair('%s=%s' % (ip, mac))) + + def test__normalises_macs(self): + ip = factory.make_ipv6_address() + mac = make_denormalised_mac() + self.assertNotEqual(script.normalise_mac(mac), mac) + self.assertEqual( + (ip, script.normalise_mac(mac)), + script.split_ip_mac_pair('%s=%s' % (ip, mac))) + + +class TestNormaliseMAC(MAASTestCase): + + def test__normalises(self): + mac = factory.make_mac_address() + self.assertEqual( + script.normalise_mac(mac.lower()), + script.normalise_mac(mac.upper())) + + def test__is_idempotent(self): + mac = factory.make_mac_address() + self.assertEqual( + script.normalise_mac(mac), + script.normalise_mac(script.normalise_mac(mac))) + + def test__strips_whitespace(self): + mac = factory.make_mac_address() + self.assertEqual(mac, script.normalise_mac(' %s\n' % mac)) + + +class TestMapInterfacesByMAC(MAASTestCase): + + def patch_listdir(self, listings): + """Replace `os.listdir` with a fake that returns the given listings. + + :param listings: A dict mapping directory paths to their contents. + """ + self.patch(script, 'listdir', listings.__getitem__) + + def patch_read_file(self, files): + """Replace `read_file` with a fake that returns the given files. + + :param files: A dict mapping file paths to their contents. + """ + self.patch(script, 'read_file', files.__getitem__) + + def test__parses_realistic_output(self): + self.patch_listdir( + {'/sys/class/net': ['eth0', 'eth1', 'lo', 'virbr0']}) + self.patch_read_file({ + '/sys/class/net/eth0/address': b'cc:5d:2e:6a:e5:eb', + '/sys/class/net/eth1/address': b'00:1e:0b:a1:6c:7b', + '/sys/class/net/lo/address': b'00:00:00:00:00:00', + '/sys/class/net/virbr0/address': b'c6:13:5d:51:42:ca', + }) + expected_mapping = { + '00:00:00:00:00:00': ['lo'], + 'cc:5d:2e:6a:e5:eb': ['eth0'], + '00:1e:0b:a1:6c:7b': ['eth1'], + 'c6:13:5d:51:42:ca': ['virbr0'], + } + + self.assertEqual(expected_mapping, script.map_interfaces_by_mac()) + + def test__integrates_with_real_sys_class_net(self): + real_interfaces = script.map_interfaces_by_mac() + self.assertIsInstance(real_interfaces, dict) + self.assertNotEqual({}, real_interfaces) + + def test__normalises_macs(self): + interface = factory.make_name('eth') + mac = make_denormalised_mac() + self.assertNotEqual(script.normalise_mac(mac), mac) + self.patch_listdir({'/sys/class/net': [interface]}) + self.patch_read_file({'/sys/class/net/%s/address' % interface: mac}) + self.assertEqual( + [script.normalise_mac(mac)], + list(script.map_interfaces_by_mac().keys())) + + def test__ignores_interfaces_without_addresses(self): + interface = factory.make_name('eth') + self.patch_listdir({'/sys/class/net': [interface]}) + self.patch_autospec(script, 'read_file').side_effect = ( + IOError(ENOENT, "Deliberate error: No such file or directory.")) + self.assertEqual({}, script.map_interfaces_by_mac()) + + def test__propagates_other_IOErrors(self): + interface = factory.make_name('eth') + self.patch_listdir({'/sys/class/net': [interface]}) + self.patch_autospec(script, 'read_file').side_effect = ( + IOError(EACCES, "Deliberate error: Permission denied.")) + self.assertRaises(IOError, script.map_interfaces_by_mac) + + def test__propagates_other_exceptions(self): + class FakeError(Exception): + """Some other type of exception that the script isn't expecting.""" + + interface = factory.make_name('eth') + self.patch_listdir({'/sys/class/net': [interface]}) + self.patch_autospec(script, 'read_file').side_effect = FakeError() + self.assertRaises(FakeError, script.map_interfaces_by_mac) + + +class TestMapAddressesByInterface(MAASTestCase): + + def test__combines_mappings(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + interface = factory.make_name('eth') + self.assertEqual( + {interface: [ip]}, + script.map_addresses_by_interface( + {mac: [interface]}, + [(ip, mac)])) + + def test__ignores_unknown_macs(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + self.assertEqual( + {}, + script.map_addresses_by_interface({}, [(ip, mac)])) + + def test__ignores_unknown_interfaces(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + self.assertEqual( + {}, + script.map_addresses_by_interface({mac: []}, [(ip, mac)])) + + def test__combines_addresses_per_interface(self): + ip1 = factory.make_ipv6_address() + ip2 = factory.make_ipv6_address() + mac = factory.make_mac_address() + interface = factory.make_name('eth') + mapping = script.map_addresses_by_interface( + {mac: [interface]}, + [(ip1, mac), (ip2, mac)]) + self.assertItemsEqual([ip1, ip2], mapping[interface]) + + +class TestComposeConfigStanza(MAASTestCase): + + def test__produces_interfaces_stanza(self): + ip = factory.make_ipv6_address() + interface = factory.make_name('eth') + expected = dedent("""\ + iface %s inet6 static + \tnetmask 64 + \taddress %s + """) % (interface, ip) + self.assertEqual( + expected.strip(), + script.compose_config_stanza(interface, [ip], []).strip()) + + def test__includes_all_given_addresses(self): + ips = [factory.make_ipv6_address() for _ in range(3)] + interface = factory.make_name('eth') + self.assertThat( + script.compose_config_stanza(interface, ips, []).strip(), + ContainsAll("address %s" % ip for ip in ips)) + + def test__includes_gateway_if_given(self): + ip = factory.make_ipv6_address() + interface = factory.make_name('eth') + gateway = factory.make_ipv6_address() + expected = dedent("""\ + iface %s inet6 static + \tnetmask 64 + \taddress %s + \tgateway %s + """) % (interface, ip, gateway) + self.assertEqual( + expected.strip(), + script.compose_config_stanza(interface, [ip], [gateway]).strip()) + + +class TestComposeConfigFile(MAASTestCase): + + def test__returns_config_file_text(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + interface = factory.make_name('eth') + self.assertIn( + script.compose_config_stanza(interface, [ip], []), + script.compose_config_file( + {mac: [interface]}, {interface: [ip]}, {})) + + +class TestLocateMAASConfig(MAASTestCase): + + def test__returns_typical_location(self): + self.assertEqual( + '/etc/network/interfaces.d/maas-config', + script.locate_maas_config('/etc/network')) + + def test__obeys_config_dir(self): + config_dir = factory.make_name('etc-network') + self.assertEqual( + '%s/interfaces.d/maas-config' % config_dir, + script.locate_maas_config(config_dir)) + + +class TestWriteFile(MAASTestCase): + + def test__writes_file(self): + path = os.path.join(self.make_dir(), factory.make_name('file')) + content = factory.make_name('content') + script.write_file(path, content) + self.assertThat(path, FileContains(content)) + + def test__obeys_encoding(self): + path = os.path.join(self.make_dir(), factory.make_name('file')) + text = factory.make_name('\u0f00') + script.write_file(path, text, encoding='utf-16') + self.assertThat(path, FileContains(text.encode('utf-16'))) + + def test__replaces_existing_file(self): + path = self.make_file() + content = factory.make_name('new-content') + script.write_file(path, content) + self.assertThat(path, FileContains(content)) + + +class TestConfigureStaticAddresses(MAASTestCase): + + def make_config_dir(self, interfaces_content=''): + """Create an `/etc/network` lookalike directory. + + The directory will contain an `interfaces` file (with the given + contents), and an `interfaces.d` directory. + """ + config_dir = self.make_dir() + makedirs(os.path.join(config_dir, 'interfaces.d')) + factory.make_file(config_dir, 'interfaces', interfaces_content) + return config_dir + + def patch_write_file(self): + return self.patch_autospec(script, 'write_file') + + def test__skips_if_network_interfaces_does_not_exist(self): + config_dir = self.make_config_dir() + remove(os.path.join(config_dir, 'interfaces')) + write_file = self.patch_write_file() + result = script.configure_static_addresses( + config_dir, ip_mac_pairs=[], gateway_mac_pairs=[], + interfaces_by_mac={}) + self.expectThat(result, Equals([])) + self.expectThat(write_file, MockNotCalled()) + + def test__skips_if_config_dir_does_not_exist(self): + config_dir = os.path.join(self.make_dir(), factory.make_name('nondir')) + write_file = self.patch_write_file() + result = script.configure_static_addresses( + config_dir, ip_mac_pairs=[], gateway_mac_pairs=[], + interfaces_by_mac={}) + self.expectThat(result, Equals([])) + self.expectThat(write_file, MockNotCalled()) + + def test__writes_to_interfaces_d(self): + config_dir = self.make_config_dir() + script.configure_static_addresses( + config_dir, ip_mac_pairs=[], gateway_mac_pairs=[], + interfaces_by_mac={}) + self.assertThat( + os.path.join(config_dir, 'interfaces.d', 'maas-config'), + FileExists()) + + def test__writes_network_config(self): + write_file = self.patch_write_file() + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + interface = factory.make_name('eth') + config_dir = self.make_config_dir() + + script.configure_static_addresses( + config_dir, ip_mac_pairs=[(ip, mac)], gateway_mac_pairs=[], + interfaces_by_mac={mac: [interface]}) + + self.assertThat(write_file, MockCalledOnceWith(ANY, ANY)) + [mock_call] = write_file.mock_calls + _, args, _ = mock_call + _, content = args + self.assertIn("address %s" % ip, content) + + def test__returns_interfaces_with_addresses(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + interface = factory.make_name('eth') + config_dir = self.make_config_dir() + self.patch_write_file() + self.assertEqual( + [interface], + script.configure_static_addresses( + config_dir, ip_mac_pairs=[(ip, mac)], gateway_mac_pairs=[], + interfaces_by_mac={mac: [interface]})) + + def test__ignores_interfaces_without_addresses(self): + ip = factory.make_ipv6_address() + mac = factory.make_mac_address() + config_dir = self.make_config_dir() + self.patch_write_file() + self.assertEqual( + [], + script.configure_static_addresses( + config_dir, ip_mac_pairs=[(ip, mac)], gateway_mac_pairs=[], + interfaces_by_mac={})) + + +class TestUpdateInterfacesFile(MAASTestCase): + + def test__adds_source_line(self): + old_content = factory.make_string() + interfaces_file = self.make_file('interfaces', old_content) + script.update_interfaces_file(os.path.dirname(interfaces_file)) + self.assertThat( + interfaces_file, + FileContains(dedent("""\ + %s + + source interfaces.d/maas-config + """ % old_content))) + + def test__skips_if_maas_config_already_mentioned(self): + old_content = dedent("""\ + %s + + source interfaces.d/maas-config + """) % factory.make_string() + interfaces_file = self.make_file('interfaces', old_content) + script.update_interfaces_file(os.path.dirname(interfaces_file)) + self.assertThat(interfaces_file, FileContains(old_content)) + + +class TestRestartInterfaces(MAASTestCase): + + def test__takes_interface_down_and_up(self): + interface = factory.make_name('eth') + check_call = self.patch(script, 'check_call') + script.restart_interfaces([interface]) + self.assertThat( + check_call, MockCallsMatch( + call(['ifdown', interface]), + call(['ifup', interface]), + )) + + +class TestGenerateUdevRule(MAASTestCase): + + def test__generates_udev_rule(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + expected_rule = ( + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' + 'ATTR{address}=="%(mac)s", NAME="%(interface)s"' + ) % {'mac': mac, 'interface': interface} + self.assertEqual( + expected_rule, + script.generate_udev_rule(interface, mac).strip()) + + +class TestNameInterfaces(MAASTestCase): + + def patch_write_file(self): + return self.patch_autospec(script, 'write_file') + + def test__writes_udev_file(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + write_file = self.patch_write_file() + script.name_interfaces({mac: [interface]}) + self.assertThat( + write_file, + MockCalledOnceWith( + '/etc/udev/rules.d/70-persistent-net.rules', ANY)) + + def test__writes_udev_rules(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + write_file = self.patch_write_file() + script.name_interfaces({mac: [interface]}) + [call] = write_file.mock_calls + _, args, _ = call + _, content = args + self.assertIn('NAME="%s"' % interface, content) + + def test__skips_loopback_but_names_other_interfaces(self): + # Testing the loopback interface together with a "regular" network + # interface. Without the regular interface, a code change could + # accidentally cause the patched generate_udev_rule never to be called + # at all. If it weren't for the extra interface, the test wouldn't + # notice. + self.patch_write_file() + lo_interface = 'lo' + lo_mac = '00:00:00:00:00:00' + proper_interface = factory.make_name('eth') + proper_mac = factory.make_mac_address() + generate_udev_rule = self.patch_autospec(script, 'generate_udev_rule') + generate_udev_rule.return_value = "(udev rule)" + script.name_interfaces( + { + lo_mac: [lo_interface], + proper_mac: [proper_interface], + }) + self.assertThat( + generate_udev_rule, + MockCalledOnceWith(proper_interface, proper_mac)) + + def test__skips_if_udev_rules_d_does_not_exist(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + write_file = self.patch_write_file() + write_file.side_effect = IOError( + ENOENT, "Deliberate error: No such file or directory.") + # The exception occurs but does not get propagated. + script.name_interfaces({mac: [interface]}) + self.assertThat(write_file, MockCalledOnceWith(ANY, ANY)) + + def test__propagates_similar_but_different_errors_writing_file(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + write_file = self.patch_write_file() + write_file.side_effect = IOError( + EACCES, "Deliberate error: Permission denied.") + self.assertRaises( + IOError, + script.name_interfaces, {mac: [interface]}) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/enum.py maas-1.7.6+bzr3376/src/metadataserver/enum.py --- maas-1.5.4+bzr2294/src/metadataserver/enum.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/enum.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,13 +13,14 @@ __metaclass__ = type __all__ = [ - 'COMMISSIONING_STATUS', - 'COMMISSIONING_STATUS_CHOICES', + 'SIGNAL_STATUS', + 'SIGNAL_STATUS_CHOICES', + 'RESULT_TYPE', + 'RESULT_TYPE_CHOICES', ] -class COMMISSIONING_STATUS: - """The vocabulary of a commissioning script's possible statuses.""" +class SIGNAL_STATUS: DEFAULT = "OK" OK = "OK" @@ -27,8 +28,20 @@ WORKING = "WORKING" -COMMISSIONING_STATUS_CHOICES = ( - (COMMISSIONING_STATUS.OK, "OK"), - (COMMISSIONING_STATUS.FAILED, "FAILED"), - (COMMISSIONING_STATUS.WORKING, "WORKING"), +SIGNAL_STATUS_CHOICES = ( + (SIGNAL_STATUS.OK, "OK"), + (SIGNAL_STATUS.FAILED, "FAILED"), + (SIGNAL_STATUS.WORKING, "WORKING"), +) + + +class RESULT_TYPE: + + COMMISSIONING = 0 + INSTALLATION = 1 + + +RESULT_TYPE_CHOICES = ( + (RESULT_TYPE.COMMISSIONING, "Commissioning"), + (RESULT_TYPE.INSTALLATION, "Installation"), ) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/fields.py maas-1.7.6+bzr3376/src/metadataserver/fields.py --- maas-1.5.4+bzr2294/src/metadataserver/fields.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/fields.py 2015-07-10 01:27:14.000000000 +0000 @@ -44,7 +44,7 @@ my_model_object.binary_data = Bin(b"\x01\x02\x03") """ - def __init__(self, initializer): + def __new__(cls, initializer): """Wrap a bytes. :param initializer: Binary string of data for this Bin. This must @@ -52,10 +52,15 @@ this constructor will refuse to render None as b'None'. :type initializer: bytes """ + # We can't do this in __init__, because it passes its argument into + # the upcall. It ends up in object.__init__, which sometimes issues + # a DeprecationWarning because it doesn't want any arguments. + # Those warnings would sometimes make their way into logs, breaking + # tests that checked those logs. if not isinstance(initializer, bytes): raise AssertionError( "Not a binary string: '%s'" % repr(initializer)) - super(Bin, self).__init__(initializer) + return super(Bin, cls).__new__(cls, initializer) def __emittable__(self): """Emit base-64 encoded bytes. diff -Nru maas-1.5.4+bzr2294/src/metadataserver/__init__.py maas-1.7.6+bzr3376/src/metadataserver/__init__.py --- maas-1.5.4+bzr2294/src/metadataserver/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -32,3 +32,10 @@ the model and will fail to generate schema migrations for it. """ app_label = 'metadataserver' + + +try: + import maasfascist + maasfascist # Silence lint. +except ImportError: + pass diff -Nru maas-1.5.4+bzr2294/src/metadataserver/migrations/0015_rename_nodecommissionresult_add_result_type.py maas-1.7.6+bzr3376/src/metadataserver/migrations/0015_rename_nodecommissionresult_add_result_type.py --- maas-1.5.4+bzr2294/src/metadataserver/migrations/0015_rename_nodecommissionresult_add_result_type.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/migrations/0015_rename_nodecommissionresult_add_result_type.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,179 @@ +from django.db import models +from metadataserver.enum import RESULT_TYPE +from south.db import db +# -*- coding: utf-8 -*- +from south.utils import datetime_utils as datetime +from south.v2 import SchemaMigration + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding field 'NodeCommissionResult.status' + db.add_column(u'metadataserver_nodecommissionresult', 'result_type', + self.gf('django.db.models.fields.IntegerField')(default=RESULT_TYPE.COMMISSIONING), + keep_default=False) + db.rename_table(u'metadataserver_nodecommissionresult', u'metadataserver_noderesult') + + def backwards(self, orm): + db.rename_table(u'metadataserver_noderesult', u'metadataserver_nodecommissionresult') + # Deleting field 'NodeCommissionResult.status' + db.delete_column(u'metadataserver_nodecommissionresult', 'result_type') + + + models = { + u'auth.group': { + 'Meta': {'object_name': 'Group'}, + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + u'auth.permission': { + 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + u'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + u'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + u'maasserver.node': { + 'Meta': {'object_name': 'Node'}, + 'agent_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'null': 'True', 'blank': 'True'}), + 'architecture': ('django.db.models.fields.CharField', [], {'max_length': '31'}), + 'boot_type': ('django.db.models.fields.CharField', [], {'default': "u'fastpath'", 'max_length': '20'}), + 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'created': ('django.db.models.fields.DateTimeField', [], {}), + 'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'blank': 'True'}), + 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), + 'error_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), + 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'license_key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), + 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), + 'osystem': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'blank': 'True'}), + 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), + 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), + 'power_state': ('django.db.models.fields.CharField', [], {'default': "u'unknown'", 'max_length': '10'}), + 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), + 'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}), + 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), + 'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-8fa4490a-20c2-11e4-97b9-e82aea220bd2'", 'unique': 'True', 'max_length': '41'}), + 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), + 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}), + 'updated': ('django.db.models.fields.DateTimeField', [], {}), + 'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Zone']", 'on_delete': 'models.SET_DEFAULT'}) + }, + u'maasserver.nodegroup': { + 'Meta': {'object_name': 'NodeGroup'}, + 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), + 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}), + 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {}), + 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), + 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'updated': ('django.db.models.fields.DateTimeField', [], {}), + 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) + }, + u'maasserver.tag': { + 'Meta': {'object_name': 'Tag'}, + 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {}), + 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), + 'updated': ('django.db.models.fields.DateTimeField', [], {}) + }, + u'maasserver.zone': { + 'Meta': {'ordering': "[u'name']", 'object_name': 'Zone'}, + 'created': ('django.db.models.fields.DateTimeField', [], {}), + 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), + 'updated': ('django.db.models.fields.DateTimeField', [], {}) + }, + u'metadataserver.commissioningscript': { + 'Meta': {'object_name': 'CommissioningScript'}, + 'content': ('metadataserver.fields.BinaryField', [], {}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) + }, + u'metadataserver.nodekey': { + 'Meta': {'object_name': 'NodeKey'}, + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), + 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), + 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}) + }, + u'metadataserver.noderesult': { + 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeResult'}, + 'created': ('django.db.models.fields.DateTimeField', [], {}), + 'data': ('metadataserver.fields.BinaryField', [], {'default': "''", 'max_length': '1048576', 'blank': 'True'}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), + 'result_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'script_result': ('django.db.models.fields.IntegerField', [], {}), + 'updated': ('django.db.models.fields.DateTimeField', [], {}) + }, + u'metadataserver.nodeuserdata': { + 'Meta': {'object_name': 'NodeUserData'}, + 'data': ('metadataserver.fields.BinaryField', [], {}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) + }, + u'piston.consumer': { + 'Meta': {'object_name': 'Consumer'}, + 'description': ('django.db.models.fields.TextField', [], {}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"}) + }, + u'piston.token': { + 'Meta': {'object_name': 'Token'}, + 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), + 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}), + u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), + 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1407698073L'}), + 'token_type': ('django.db.models.fields.IntegerField', [], {}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}), + 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) + } + } + + complete_apps = ['metadataserver'] diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/commissioningscript.py maas-1.7.6+bzr3376/src/metadataserver/models/commissioningscript.py --- maas-1.5.4+bzr2294/src/metadataserver/models/commissioningscript.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/commissioningscript.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,8 +14,8 @@ __metaclass__ = type __all__ = [ - 'BUILTIN_COMMISSIONING_SCRIPTS', 'CommissioningScript', + 'get_builtin_commissioning_scripts', 'inject_lldp_result', 'inject_lshw_result', 'inject_result', @@ -43,13 +43,15 @@ ) from lxml import etree from maasserver.fields import MAC +from maasserver.models import Config from maasserver.models.tag import Tag from metadataserver import DefaultMeta +from metadataserver.enum import RESULT_TYPE from metadataserver.fields import ( Bin, BinaryField, ) -from metadataserver.models.nodecommissionresult import NodeCommissionResult +from metadataserver.models.noderesult import NodeResult logger = logging.getLogger(__name__) @@ -137,9 +139,15 @@ div 1024 div 1024 """ +# Select 1234, or, +# failing that, 1234 +# that's not nested within a . _xpath_storage_bytes = """\ - sum(//node[@class='volume']/size[@units='bytes']) - div 1024 div 1024 + ( + //node[@class='disk'] | + //node[not(ancestor::node[@class='disk']) and @class='volume'] + ) + /size[@units='bytes'] div 1000 div 1000 """ @@ -213,6 +221,28 @@ node.system_id) +# Run `dhclient` on all the unconfigured interfaces. +# This is done to create records in the leases file for the +# NICs attached to unconfigured interfaces. This way the leases +# parser will be able to connect these NICs and the networks +# MAAS knows about. +def dhcp_explore(): + def get_iface_list(ifconfig_output): + return [ + line.split()[0] + for line in ifconfig_output.splitlines()[1:]] + + from subprocess import check_output, call + all_ifaces = get_iface_list(check_output(("ifconfig", "-s", "-a"))) + configured_ifaces = get_iface_list(check_output(("ifconfig", "-s"))) + unconfigured_ifaces = set(all_ifaces) - set(configured_ifaces) + for iface in sorted(unconfigured_ifaces): + # Run dhclient in the background to avoid blocking the commissioning. + call(["dhclient", "-nw", iface]) + # Ignore return value and continue running dhcplient on the + # other interfaces. + + # This function must be entirely self-contained. It must not use # variables or imports from the surrounding scope. def lldpd_install(config_file): @@ -323,6 +353,7 @@ LIST_MODALIASES_OUTPUT_NAME = '00-maas-04-list-modaliases.out' LIST_MODALIASES_SCRIPT = \ 'find /sys -name modalias -print0 | xargs -0 cat | sort -u' +DHCP_UNCONFIGURED_INTERFACES_NAME = '00-maas-05-dhcp-unconfigured-ifaces' def null_hook(node, output, exit_status): @@ -368,6 +399,10 @@ 'content': LIST_MODALIASES_SCRIPT.encode('ascii'), 'hook': null_hook, }, + DHCP_UNCONFIGURED_INTERFACES_NAME: { + 'content': make_function_call_script(dhcp_explore), + 'hook': null_hook, + }, '99-maas-01-wait-for-lldpd.out': { 'content': make_function_call_script( lldpd_wait, "/var/run/lldpd.socket", time_delay=60), @@ -397,6 +432,20 @@ add_names_to_scripts(BUILTIN_COMMISSIONING_SCRIPTS) +def get_builtin_commissioning_scripts(): + """Get the builtin commissioning scripts. + + The builtin scripts exposed may vary based on config settings. + """ + scripts = BUILTIN_COMMISSIONING_SCRIPTS.copy() + + config_key = 'enable_dhcp_discovery_on_unconfigured_interfaces' + if not Config.objects.get_config(config_key): + del scripts[DHCP_UNCONFIGURED_INTERFACES_NAME] + + return scripts + + def add_script_to_archive(tarball, name, content, mtime): """Add a commissioning script to an archive of commissioning scripts.""" assert isinstance(content, bytes), "Script content must be binary." @@ -414,7 +463,7 @@ """Utility for the collection of `CommissioningScript`s.""" def _iter_builtin_scripts(self): - for script in BUILTIN_COMMISSIONING_SCRIPTS.itervalues(): + for script in get_builtin_commissioning_scripts().itervalues(): yield script['name'], script['content'] def _iter_user_scripts(self): @@ -459,15 +508,17 @@ def inject_result(node, name, output, exit_status=0): """Inject a `name` result and trigger related hooks, if any. - `output` and `exit_status` are recorded as `NodeCommissionResult` + `output` and `exit_status` are recorded as `NodeResult` instances with the `name` given. A built-in hook is then searched for; if found, it is invoked. """ assert isinstance(output, bytes) - NodeCommissionResult.objects.store_data( - node, name, script_result=exit_status, data=Bin(output)) - if name in BUILTIN_COMMISSIONING_SCRIPTS: - postprocess_hook = BUILTIN_COMMISSIONING_SCRIPTS[name]['hook'] + NodeResult.objects.store_data( + node, name, script_result=exit_status, + result_type=RESULT_TYPE.COMMISSIONING, data=Bin(output)) + builtin_commissioning_scripts = get_builtin_commissioning_scripts() + if name in builtin_commissioning_scripts: + postprocess_hook = builtin_commissioning_scripts[name]['hook'] postprocess_hook(node=node, output=output, exit_status=exit_status) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/__init__.py maas-1.7.6+bzr3376/src/metadataserver/models/__init__.py --- maas-1.5.4+bzr2294/src/metadataserver/models/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -15,16 +15,16 @@ __metaclass__ = type __all__ = [ 'CommissioningScript', - 'NodeCommissionResult', + 'NodeResult', 'NodeKey', 'NodeUserData', ] from maasserver.utils import ignore_unused from metadataserver.models.commissioningscript import CommissioningScript -from metadataserver.models.nodecommissionresult import NodeCommissionResult from metadataserver.models.nodekey import NodeKey +from metadataserver.models.noderesult import NodeResult from metadataserver.models.nodeuserdata import NodeUserData -ignore_unused(CommissioningScript, NodeCommissionResult, NodeKey, NodeUserData) +ignore_unused(CommissioningScript, NodeResult, NodeKey, NodeUserData) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/nodecommissionresult.py maas-1.7.6+bzr3376/src/metadataserver/models/nodecommissionresult.py --- maas-1.5.4+bzr2294/src/metadataserver/models/nodecommissionresult.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/nodecommissionresult.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -""":class:`NodeCommissionResult` model.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'NodeCommissionResult', - ] - - -from django.db.models import ( - CharField, - ForeignKey, - IntegerField, - Manager, - ) -from django.shortcuts import get_object_or_404 -from django.utils.html import escape -from maasserver.models.cleansave import CleanSave -from maasserver.models.timestampedmodel import TimestampedModel -from metadataserver import DefaultMeta -from metadataserver.fields import BinaryField - - -class NodeCommissionResultManager(Manager): - """Utility to manage a collection of :class:`NodeCommissionResult`s.""" - - def clear_results(self, node): - """Remove all existing results for a node.""" - self.filter(node=node).delete() - - def store_data(self, node, name, script_result, data): - """Store data about a node. - - :param node: The node that this result pertains to. - :type node: :class:`maasserver.models.Node` - - :param name: The name of this result, typically the name of - the commissioning script that generated it. - :type name: string - - :param script_result: The exit code of the commissioning - script. - :type script_result: int - - :param data: The raw binary output of the commissioning - script. - :type data: :class:`metadataserver.fields.Bin` - - """ - existing, created = self.get_or_create( - node=node, name=name, - defaults=dict(script_result=script_result, data=data)) - if not created: - existing.script_result = script_result - existing.data = data - existing.save() - return existing - - def get_data(self, node, name): - """Get data about a node.""" - ncr = get_object_or_404(NodeCommissionResult, node=node, name=name) - return ncr.data - - -class NodeCommissionResult(CleanSave, TimestampedModel): - """Storage for data returned from node commissioning. - - Commissioning a node results in various bits of data that need to be - stored, such as lshw output. This model allows storing of this data - as unicode text, with an arbitrary name, for later retrieval. - - :ivar node: The context :class:`Node`. - :ivar status: If this data results from the execution of a script, this - is the status of this execution. This can be "OK", "FAILED" or - "WORKING" for progress reports. - :ivar name: A unique name to use for the data being stored. - :ivar data: The file's actual data, unicode only. - """ - - class Meta(DefaultMeta): - unique_together = ('node', 'name') - - objects = NodeCommissionResultManager() - - node = ForeignKey( - 'maasserver.Node', null=False, editable=False, unique=False) - script_result = IntegerField(editable=False) - name = CharField(max_length=255, unique=False, editable=False) - data = BinaryField( - max_length=1024 * 1024, editable=True, blank=True, default=b'', - null=False) - - def __unicode__(self): - return "%s/%s" % (self.node.system_id, self.name) - - def get_data_as_html(self): - """More-or-less human-readable HTML representation of the output.""" - return escape(self.data.decode('utf-8', 'replace')) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/noderesult.py maas-1.7.6+bzr3376/src/metadataserver/models/noderesult.py --- maas-1.5.4+bzr2294/src/metadataserver/models/noderesult.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/noderesult.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,135 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +""":class:`NodeResult` model.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'NodeResult', + ] + + +from django.db.models import ( + CharField, + ForeignKey, + IntegerField, + Manager, + ) +from django.shortcuts import get_object_or_404 +from django.utils.html import escape +from maasserver.models.cleansave import CleanSave +from maasserver.models.timestampedmodel import TimestampedModel +from maasserver.utils.converters import XMLToYAML +from metadataserver import DefaultMeta +from metadataserver.enum import ( + RESULT_TYPE, + RESULT_TYPE_CHOICES, + ) +from metadataserver.fields import BinaryField + + +class NodeResultManager(Manager): + """Utility to manage a collection of :class:`NodeResult`s.""" + + def clear_results(self, node): + """Remove all existing results for a node.""" + self.filter(node=node).delete() + + def store_data(self, node, name, script_result, result_type, data): + """Store data about a node. + + :param node: The node that this result pertains to. + :type node: :class:`maasserver.models.Node` + + :param name: The name of this result, typically the name of + the commissioning script that generated it. + :type name: string + + :param script_result: The exit code of the commissioning + script. + :type script_result: int + + :param result_type: The enum value for either commissioning (0) + or installing (1). + :type script_result: int + + :param data: The raw binary output of the commissioning + script. + :type data: :class:`metadataserver.fields.Bin` + + """ + existing, created = self.get_or_create( + node=node, name=name, + defaults=dict( + script_result=script_result, result_type=result_type, + data=data)) + if not created: + existing.script_result = script_result + existing.result_type = result_type + existing.data = data + existing.save() + return existing + + def get_data(self, node, name): + """Get data about a node.""" + ncr = get_object_or_404(NodeResult, node=node, name=name) + return ncr.data + + +class NodeResult(CleanSave, TimestampedModel): + """Storage for data returned from node commissioning/installation. + + Commissioning/Installing a node results in various bits of data that + need to be stored, such as lshw output. This model allows storing of + this data as unicode text, with an arbitrary name, for later retrieval. + + :ivar node: The context :class:`Node`. + :ivar script_result: If this data results from the execution of a script, + this is the status of this execution. This can be "OK", "FAILED" or + "WORKING" for progress reports. + :ivar result_type: This can be either commissioning or installation. + :ivar name: A unique name to use for the data being stored. + :ivar data: The file's actual data, unicode only. + """ + + class Meta(DefaultMeta): + unique_together = ('node', 'name') + + objects = NodeResultManager() + + node = ForeignKey( + 'maasserver.Node', null=False, editable=False, unique=False) + script_result = IntegerField(editable=False) + result_type = IntegerField( + choices=RESULT_TYPE_CHOICES, editable=False, + default=RESULT_TYPE.COMMISSIONING) + name = CharField(max_length=255, unique=False, editable=False) + data = BinaryField( + max_length=1024 * 1024, editable=True, blank=True, default=b'', + null=False) + + def __unicode__(self): + return "%s/%s" % (self.node.system_id, self.name) + + def get_data_as_html(self): + """More-or-less human-readable HTML representation of the output.""" + return escape(self.data.decode('utf-8', 'replace')) + + def get_data_as_yaml_html(self): + """More-or-less human-readable Yaml HTML representation + of the output. + """ + from metadataserver.models.commissioningscript import ( + LLDP_OUTPUT_NAME, + LSHW_OUTPUT_NAME, + ) + if self.name in (LLDP_OUTPUT_NAME, LSHW_OUTPUT_NAME): + return escape(XMLToYAML(self.data).convert()) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/nodeuserdata.py maas-1.7.6+bzr3376/src/metadataserver/models/nodeuserdata.py --- maas-1.5.4+bzr2294/src/metadataserver/models/nodeuserdata.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/nodeuserdata.py 2015-07-10 01:27:14.000000000 +0000 @@ -64,6 +64,18 @@ """Remove metadata from node, if it has any any.""" self.filter(node=node).delete() + def bulk_set_user_data(self, nodes, data): + """Set the user data for the given nodes in bulk. + + This is more efficient than calling `set_user_data` on each node. + """ + self.filter(node__in=nodes).delete() + if data is not None: + self.bulk_create(( + self.model(node=node, data=Bin(data)) + for node in nodes + )) + class NodeUserData(CleanSave, Model): """User-data portion of a node's metadata. diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_commissioningscript.py maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_commissioningscript.py --- maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_commissioningscript.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_commissioningscript.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,603 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Test custom commissioning scripts.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import doctest -from inspect import getsource -from io import BytesIO -from math import ( - ceil, - floor, - ) -import os.path -from random import randint -import subprocess -from subprocess import ( - CalledProcessError, - check_output, - STDOUT, - ) -import tarfile -from textwrap import dedent -import time - -from fixtures import FakeLogger -from maasserver.fields import MAC -from maasserver.models.tag import Tag -from maasserver.testing import reload_object -from maasserver.testing.factory import factory -from maasserver.testing.testcase import ( - MAASServerTestCase, - TestWithoutCrochetMixin, - ) -from maastesting.matchers import MockCalledOnceWith -from maastesting.utils import sample_binary_data -from metadataserver.fields import Bin -from metadataserver.models import ( - CommissioningScript, - commissioningscript as cs_module, - ) -from metadataserver.models.commissioningscript import ( - ARCHIVE_PREFIX, - extract_router_mac_addresses, - inject_lldp_result, - inject_lshw_result, - inject_result, - LLDP_OUTPUT_NAME, - LSHW_OUTPUT_NAME, - make_function_call_script, - set_node_routers, - set_virtual_tag, - update_hardware_details, - ) -from metadataserver.models.nodecommissionresult import NodeCommissionResult -from mock import ( - call, - create_autospec, - Mock, - sentinel, - ) -from testtools.content import text_content -from testtools.matchers import ( - ContainsAll, - DocTestMatches, - MatchesStructure, - ) - - -def open_tarfile(content): - """Open tar file from raw binary data.""" - return tarfile.open(fileobj=BytesIO(content)) - - -def make_script_name(base_name=None, number=None): - """Make up a name for a commissioning script.""" - if base_name is None: - base_name = 'script' - if number is None: - number = randint(0, 99) - return factory.make_name( - '%0.2d-%s' % (number, factory.make_name(base_name))) - - -class TestCommissioningScriptManager(MAASServerTestCase): - - def test_get_archive_wraps_scripts_in_tar(self): - script = factory.make_commissioning_script() - path = os.path.join(ARCHIVE_PREFIX, script.name) - archive = open_tarfile(CommissioningScript.objects.get_archive()) - self.assertTrue(archive.getmember(path).isfile()) - self.assertEqual(script.content, archive.extractfile(path).read()) - - def test_get_archive_wraps_all_scripts(self): - scripts = {factory.make_commissioning_script() for counter in range(3)} - archive = open_tarfile(CommissioningScript.objects.get_archive()) - self.assertThat( - archive.getnames(), - ContainsAll({ - os.path.join(ARCHIVE_PREFIX, script.name) - for script in scripts - })) - - def test_get_archive_supports_binary_scripts(self): - script = factory.make_commissioning_script(content=sample_binary_data) - path = os.path.join(ARCHIVE_PREFIX, script.name) - archive = open_tarfile(CommissioningScript.objects.get_archive()) - self.assertEqual(script.content, archive.extractfile(path).read()) - - def test_get_archive_includes_builtin_scripts(self): - name = factory.make_name('00-maas') - path = os.path.join(ARCHIVE_PREFIX, name) - content = factory.getRandomString().encode('ascii') - data = dict(name=name, content=content, hook='hook') - self.patch(cs_module, 'BUILTIN_COMMISSIONING_SCRIPTS', {name: data}) - archive = open_tarfile(CommissioningScript.objects.get_archive()) - self.assertIn(path, archive.getnames()) - self.assertEqual(content, archive.extractfile(path).read()) - - def test_get_archive_sets_sensible_mode(self): - for counter in range(3): - factory.make_commissioning_script() - archive = open_tarfile(CommissioningScript.objects.get_archive()) - self.assertEqual({0755}, {info.mode for info in archive.getmembers()}) - - def test_get_archive_initializes_file_timestamps(self): - # The mtime on a file inside the tarball is reasonable. - # It would otherwise default to the Epoch, and GNU tar warns - # annoyingly about improbably old files. - start_time = floor(time.time()) - script = factory.make_commissioning_script() - path = os.path.join(ARCHIVE_PREFIX, script.name) - archive = open_tarfile(CommissioningScript.objects.get_archive()) - timestamp = archive.getmember(path).mtime - end_time = ceil(time.time()) - self.assertGreaterEqual(timestamp, start_time) - self.assertLessEqual(timestamp, end_time) - - -class TestCommissioningScript(MAASServerTestCase): - - def test_scripts_may_be_binary(self): - name = make_script_name() - CommissioningScript.objects.create( - name=name, content=Bin(sample_binary_data)) - stored_script = CommissioningScript.objects.get(name=name) - self.assertEqual(sample_binary_data, stored_script.content) - - -class TestMakeFunctionCallScript(MAASServerTestCase): - - def run_script(self, script): - script_filename = self.make_file("test.py", script) - os.chmod(script_filename, 0700) - try: - return check_output((script_filename,), stderr=STDOUT) - except CalledProcessError as error: - self.addDetail("output", text_content(error.output)) - raise - - def test_basic(self): - def example_function(): - print("Hello, World!", end="") - script = make_function_call_script(example_function) - self.assertEqual(b"Hello, World!", self.run_script(script)) - - def test_positional_args_get_passed_through(self): - def example_function(a, b): - print("a=%s, b=%d" % (a, b), end="") - script = make_function_call_script(example_function, "foo", 12345) - self.assertEqual(b"a=foo, b=12345", self.run_script(script)) - - def test_keyword_args_get_passed_through(self): - def example_function(a, b): - print("a=%s, b=%d" % (a, b), end="") - script = make_function_call_script(example_function, a="foo", b=12345) - self.assertEqual(b"a=foo, b=12345", self.run_script(script)) - - def test_positional_and_keyword_args_get_passed_through(self): - def example_function(a, b): - print("a=%s, b=%d" % (a, b), end="") - script = make_function_call_script(example_function, "foo", b=12345) - self.assertEqual(b"a=foo, b=12345", self.run_script(script)) - - def test_non_ascii_positional_args_are_passed_without_corruption(self): - def example_function(text): - print(repr(text), end="") - script = make_function_call_script(example_function, "abc\u1234") - self.assertEqual(b"u'abc\\u1234'", self.run_script(script)) - - def test_non_ascii_keyword_args_are_passed_without_corruption(self): - def example_function(text): - print(repr(text), end="") - script = make_function_call_script(example_function, text="abc\u1234") - self.assertEqual(b"u'abc\\u1234'", self.run_script(script)) - - def test_structured_arguments_are_passed_though_too(self): - # Anything that can be JSON serialized can be passed. - def example_function(arg): - if arg == {"123": "foo", "bar": [4, 5, 6]}: - print("Equal") - else: - print("Unequal, got %s" % repr(arg)) - script = make_function_call_script( - example_function, {"123": "foo", "bar": [4, 5, 6]}) - self.assertEqual(b"Equal\n", self.run_script(script)) - - -def isolate_function(function): - """Recompile the given function in an empty namespace.""" - source = dedent(getsource(function)) - modcode = compile(source, "lldpd.py", "exec") - namespace = {} - exec(modcode, namespace) - return namespace[function.__name__] - - -class TestLLDPScripts(TestWithoutCrochetMixin, MAASServerTestCase): - - def test_install_script_installs_configures_and_restarts(self): - config_file = self.make_file("config", "# ...") - check_call = self.patch(subprocess, "check_call") - lldpd_install = isolate_function(cs_module.lldpd_install) - lldpd_install(config_file) - # lldpd is installed and restarted. - self.assertEqual( - check_call.call_args_list, - [ - call(("apt-get", "install", "--yes", "lldpd")), - call(("initctl", "reload-configuration")), - call(("service", "lldpd", "restart")) - ]) - # lldpd's config was updated to include an updated DAEMON_ARGS - # setting. Note that the new comment is on a new line, and - # does not interfere with existing config. - config_expected = dedent("""\ - # ... - # Configured by MAAS: - DAEMON_ARGS="-c -f -s -e -r" - """).encode("ascii") - with open(config_file, "rb") as fd: - config_observed = fd.read() - self.assertEqual(config_expected, config_observed) - - def test_wait_script_waits_for_lldpd(self): - reference_file = self.make_file("reference") - time_delay = 8.98 # seconds - lldpd_wait = isolate_function(cs_module.lldpd_wait) - # Do the patching as late as possible, because the setup may call - # one of the patched functions somewhere in the plumbing. We've had - # spurious test failures over this: bug 1283918. - self.patch(os.path, "getmtime").return_value = 10.65 - self.patch(time, "time").return_value = 14.12 - self.patch(time, "sleep") - - lldpd_wait(reference_file, time_delay) - - # lldpd_wait checks the mtime of the reference file, - self.assertThat(os.path.getmtime, MockCalledOnceWith(reference_file)) - # and gets the current time, - self.assertThat(time.time, MockCalledOnceWith()) - # then sleeps until time_delay seconds has passed since the - # mtime of the reference file. - self.assertThat(time.sleep, MockCalledOnceWith( - os.path.getmtime.return_value + time_delay - - time.time.return_value)) - - def test_capture_calls_lldpdctl(self): - check_call = self.patch(subprocess, "check_call") - lldpd_capture = isolate_function(cs_module.lldpd_capture) - lldpd_capture() - self.assertEqual( - check_call.call_args_list, - [call(("lldpctl", "-f", "xml"))]) - - -lldp_output_template = """ - - -%s - -""" - -lldp_output_interface_template = """ - - - %s - switch-name - HDFD5BG7J - 192.168.9.9 - - - - -""" - - -def make_lldp_output(macs): - """Return an example raw lldp output containing the given MACs.""" - interfaces = '\n'.join( - lldp_output_interface_template % mac - for mac in macs - ) - script = (lldp_output_template % interfaces).encode('utf8') - return bytes(script) - - -class TestExtractRouters(MAASServerTestCase): - - def test_extract_router_mac_addresses_returns_None_when_empty_input(self): - self.assertIsNone(extract_router_mac_addresses('')) - - def test_extract_router_mac_addresses_returns_empty_list(self): - lldp_output = make_lldp_output([]) - self.assertItemsEqual([], extract_router_mac_addresses(lldp_output)) - - def test_extract_router_mac_addresses_returns_routers_list(self): - macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] - lldp_output = make_lldp_output(macs) - routers = extract_router_mac_addresses(lldp_output) - self.assertItemsEqual(macs, routers) - - -class TestSetNodeRouters(MAASServerTestCase): - - def test_set_node_routers_updates_node(self): - node = factory.make_node(routers=None) - macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] - lldp_output = make_lldp_output(macs) - set_node_routers(node, lldp_output, 0) - self.assertItemsEqual( - [MAC(mac) for mac in macs], reload_object(node).routers) - - def test_set_node_routers_updates_node_if_no_routers(self): - node = factory.make_node() - lldp_output = make_lldp_output([]) - set_node_routers(node, lldp_output, 0) - self.assertItemsEqual([], reload_object(node).routers) - - def test_set_node_routers_does_nothing_if_script_failed(self): - node = factory.make_node() - routers_before = node.routers - macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] - lldp_output = make_lldp_output(macs) - set_node_routers(node, lldp_output, exit_status=1) - routers_after = reload_object(node).routers - self.assertItemsEqual(routers_before, routers_after) - - -class TestInjectResult(MAASServerTestCase): - - def test_inject_result_stores_data(self): - node = factory.make_node() - name = factory.make_name("result") - output = factory.getRandomBytes() - exit_status = next(factory.random_octets) - - inject_result(node, name, output, exit_status) - - self.assertThat( - NodeCommissionResult.objects.get(node=node, name=name), - MatchesStructure.byEquality( - node=node, name=name, script_result=exit_status, - data=output)) - - def test_inject_result_calls_hook(self): - node = factory.make_node() - name = factory.make_name("result") - output = factory.getRandomBytes() - exit_status = next(factory.random_octets) - hook = Mock() - self.patch( - cs_module, "BUILTIN_COMMISSIONING_SCRIPTS", - {name: {"hook": hook}}) - - inject_result(node, name, output, exit_status) - - self.assertThat(hook, MockCalledOnceWith( - node=node, output=output, exit_status=exit_status)) - - def inject_lshw_result(self): - # inject_lshw_result() just calls through to inject_result(). - inject_result = self.patch( - cs_module, "inject_result", - create_autospec(cs_module.inject_result)) - inject_lshw_result(sentinel.node, sentinel.output, sentinel.status) - self.assertThat(inject_result, MockCalledOnceWith( - sentinel.node, LSHW_OUTPUT_NAME, sentinel.output, sentinel.status)) - - def inject_lldp_result(self): - # inject_lldp_result() just calls through to inject_result(). - inject_result = self.patch( - cs_module, "inject_result", - create_autospec(cs_module.inject_result)) - inject_lldp_result(sentinel.node, sentinel.output, sentinel.status) - self.assertThat(inject_result, MockCalledOnceWith( - sentinel.node, LLDP_OUTPUT_NAME, sentinel.output, sentinel.status)) - - -class TestSetVirtualTag(MAASServerTestCase): - - def getVirtualTag(self): - virtual_tag, _ = Tag.objects.get_or_create(name='virtual') - return virtual_tag - - def assertTagsEqual(self, node, tags): - self.assertItemsEqual( - tags, [tag.name for tag in node.tags.all()]) - - def test_sets_virtual_tag(self): - node = factory.make_node() - self.assertTagsEqual(node, []) - set_virtual_tag(node, b"virtual", 0) - self.assertTagsEqual(node, ["virtual"]) - - def test_removes_virtual_tag(self): - node = factory.make_node() - node.tags.add(self.getVirtualTag()) - self.assertTagsEqual(node, ["virtual"]) - set_virtual_tag(node, b"notvirtual", 0) - self.assertTagsEqual(node, []) - - def test_output_not_containing_virtual_does_not_set_tag(self): - logger = self.useFixture(FakeLogger()) - node = factory.make_node() - self.assertTagsEqual(node, []) - set_virtual_tag(node, b"wibble", 0) - self.assertTagsEqual(node, []) - self.assertEqual( - "Neither 'virtual' nor 'notvirtual' appeared in the captured " - "VIRTUALITY_SCRIPT output for node %s.\n" % node.system_id, - logger.output) - - def test_output_not_containing_virtual_does_not_remove_tag(self): - logger = self.useFixture(FakeLogger()) - node = factory.make_node() - node.tags.add(self.getVirtualTag()) - self.assertTagsEqual(node, ["virtual"]) - set_virtual_tag(node, b"wibble", 0) - self.assertTagsEqual(node, ["virtual"]) - self.assertEqual( - "Neither 'virtual' nor 'notvirtual' appeared in the captured " - "VIRTUALITY_SCRIPT output for node %s.\n" % node.system_id, - logger.output) - - -class TestUpdateHardwareDetails(MAASServerTestCase): - - doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE - - def test_hardware_updates_cpu_count(self): - node = factory.make_node() - xmlbytes = dedent("""\ - - - - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - self.assertEqual(2, node.cpu_count) - - def test_cpu_count_counts_multi_cores(self): - node = factory.make_node() - xmlbytes = dedent("""\ - - - - - - - - - - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - self.assertEqual(5, node.cpu_count) - - def test_cpu_count_skips_disabled_cpus(self): - node = factory.make_node() - xmlbytes = dedent("""\ - - - - - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - self.assertEqual(1, node.cpu_count) - - def test_hardware_updates_memory(self): - node = factory.make_node() - xmlbytes = dedent("""\ - - 4294967296 - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - self.assertEqual(4096, node.memory) - - def test_hardware_updates_memory_lenovo(self): - node = factory.make_node() - xmlbytes = dedent("""\ - - - - 4294967296 - - - 3221225472 - - - - - 536870912 - - - - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - mega = 2 ** 20 - expected = (4294967296 + 3221225472 + 536879812) / mega - self.assertEqual(expected, node.memory) - - def test_hardware_updates_storage(self): - node = factory.make_node() - xmlbytes = dedent("""\ - - Extended partition - 1 - scsi@0:0.0.0,1 - /dev/sda1 - 8:1 - 127033934848 - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - self.assertEqual(121149, node.storage) - - def test_hardware_updates_storage_1279728(self): - # Hardware data from bug 1279728. - node = factory.make_node() - xmlbytes = dedent("""\ - - EXT4 volume - Linux - 1 - scsi@0:0.0.0,1 - /dev/sda1 - 801568677888 - - """).encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - self.assertEqual(764435, node.storage) - - def test_hardware_updates_ignores_empty_tags(self): - # Tags with empty definitions are ignored when - # update_hardware_details gets called. - factory.make_tag(definition='') - node = factory.make_node() - node.save() - xmlbytes = ''.encode("utf-8") - update_hardware_details(node, xmlbytes, 0) - node = reload_object(node) - # The real test is that update_hardware_details does not blow - # up, see bug 1131418. - self.assertEqual([], list(node.tags.all())) - - def test_hardware_updates_logs_invalid_xml(self): - logger = self.useFixture(FakeLogger()) - update_hardware_details(factory.make_node(), b"garbage", 0) - expected_log = dedent("""\ - Invalid lshw data. - Traceback (most recent call last): - ... - XMLSyntaxError: Start tag expected, '<' not found, line 1, column 1 - """) - self.assertThat( - logger.output, DocTestMatches( - expected_log, self.doctest_flags)) - - def test_hardware_updates_does_nothing_when_exit_status_is_not_zero(self): - logger = self.useFixture(FakeLogger()) - update_hardware_details(factory.make_node(), b"garbage", exit_status=1) - self.assertEqual("", logger.output) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_nodecommissionresult.py maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_nodecommissionresult.py --- maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_nodecommissionresult.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_nodecommissionresult.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,7 +1,7 @@ # Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). -"""Tests for the :class:`NodeCommissionResult` model.""" +"""Tests for the :class:`NodeResult` model.""" from __future__ import ( absolute_import, @@ -19,54 +19,73 @@ from django.core.exceptions import ValidationError from django.http import Http404 from maasserver.testing.factory import factory +from maasserver.utils.converters import XMLToYAML from maastesting.djangotestcase import DjangoTestCase +from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin -from metadataserver.models import NodeCommissionResult +from metadataserver.models import NodeResult +from metadataserver.models.commissioningscript import ( + LLDP_OUTPUT_NAME, + LSHW_OUTPUT_NAME, + ) -class TestNodeCommissionResult(DjangoTestCase): - """Test the NodeCommissionResult model.""" +class TestNodeResult(DjangoTestCase): + """Test the NodeResult model.""" def test_unicode_represents_result(self): - result = factory.make_node_commission_result() + result = factory.make_NodeResult_for_commissioning() self.assertEqual( '%s/%s' % (result.node.system_id, result.name), unicode(result)) def test_can_store_data(self): - node = factory.make_node() - name = factory.getRandomString() - data = factory.getRandomBytes() - factory.make_node_commission_result(node=node, name=name, data=data) + node = factory.make_Node() + name = factory.make_string() + data = factory.make_bytes() + factory.make_NodeResult_for_commissioning( + node=node, name=name, data=data) - ncr = NodeCommissionResult.objects.get(name=name) + ncr = NodeResult.objects.get(name=name) self.assertAttributes(ncr, dict(node=node, data=data)) def test_node_name_uniqueness(self): # You cannot have two result rows with the same name for the # same node. - node = factory.make_node() - factory.make_node_commission_result(node=node, name="foo") + node = factory.make_Node() + factory.make_NodeResult_for_commissioning(node=node, name="foo") self.assertRaises( ValidationError, - factory.make_node_commission_result, node=node, name="foo") + factory.make_NodeResult_for_commissioning, node=node, name="foo") def test_different_nodes_can_have_same_data_name(self): - node = factory.make_node() - ncr1 = factory.make_node_commission_result(node=node, name="foo") - node2 = factory.make_node() - ncr2 = factory.make_node_commission_result(node=node2, name="foo") + node = factory.make_Node() + ncr1 = factory.make_NodeResult_for_commissioning( + node=node, name="foo") + node2 = factory.make_Node() + ncr2 = factory.make_NodeResult_for_commissioning( + node=node2, name="foo") self.assertEqual(ncr1.name, ncr2.name) def test_get_data_as_html_returns_output(self): - output = factory.getRandomString() - result = factory.make_node_commission_result( + output = factory.make_string() + result = factory.make_NodeResult_for_commissioning( data=output.encode('ascii')) self.assertEqual(output, result.get_data_as_html()) + def test_get_data_as_yaml_html_returns_output(self): + data = "bar".encode("utf-8") + expected = XMLToYAML(data).convert() + lshw_result = factory.make_NodeResult_for_commissioning( + name=LSHW_OUTPUT_NAME, script_result=0, data=data) + lldp_result = factory.make_NodeResult_for_commissioning( + name=LLDP_OUTPUT_NAME, script_result=0, data=data) + self.assertEqual(expected, lshw_result.get_data_as_yaml_html()) + self.assertEqual(expected, lldp_result.get_data_as_yaml_html()) + def test_get_data_as_html_escapes_binary(self): output = b'\x00\xff' - result = factory.make_node_commission_result(data=output) + result = factory.make_NodeResult_for_commissioning(data=output) html = result.get_data_as_html() self.assertIsInstance(html, unicode) # The nul byte turns into the zero character. The 0xff is an invalid @@ -75,71 +94,73 @@ def test_get_data_as_html_escapes_for_html(self): output = '<&>' - result = factory.make_node_commission_result( + result = factory.make_NodeResult_for_commissioning( data=output.encode('ascii')) self.assertEqual('<&>', result.get_data_as_html()) -class TestNodeCommissionResultManager(DjangoTestCase): - """Test the manager utility for NodeCommissionResult.""" +class TestNodeResultManager(DjangoTestCase): + """Test the manager utility for NodeResult.""" def test_clear_results_removes_rows(self): # clear_results should remove all a node's results. - node = factory.make_node() - factory.make_node_commission_result(node=node) - factory.make_node_commission_result(node=node) - factory.make_node_commission_result(node=node) + node = factory.make_Node() + factory.make_NodeResult_for_commissioning(node=node) + factory.make_NodeResult_for_commissioning(node=node) + factory.make_NodeResult_for_commissioning(node=node) - NodeCommissionResult.objects.clear_results(node) + NodeResult.objects.clear_results(node) self.assertItemsEqual( [], - NodeCommissionResult.objects.filter(node=node)) + NodeResult.objects.filter(node=node)) def test_clear_results_ignores_other_nodes(self): # clear_results should only remove results for the supplied # node. - node1 = factory.make_node() - factory.make_node_commission_result(node=node1) - node2 = factory.make_node() - factory.make_node_commission_result(node=node2) + node1 = factory.make_Node() + factory.make_NodeResult_for_commissioning(node=node1) + node2 = factory.make_Node() + factory.make_NodeResult_for_commissioning(node=node2) - NodeCommissionResult.objects.clear_results(node1) + NodeResult.objects.clear_results(node1) self.assertTrue( - NodeCommissionResult.objects.filter(node=node2).exists()) + NodeResult.objects.filter(node=node2).exists()) def test_store_data(self): - node = factory.make_node() - name = factory.getRandomString(255) - data = factory.getRandomBytes(1024 * 1024) + node = factory.make_Node() + name = factory.make_string(255) + data = factory.make_bytes(1024 * 1024) script_result = randint(0, 10) - result = NodeCommissionResult.objects.store_data( - node, name=name, script_result=script_result, data=Bin(data)) - result_in_db = NodeCommissionResult.objects.get(node=node) + result = NodeResult.objects.store_data( + node, name=name, script_result=script_result, + result_type=RESULT_TYPE.COMMISSIONING, data=Bin(data)) + result_in_db = NodeResult.objects.get(node=node) self.assertAttributes(result_in_db, dict(name=name, data=data)) # store_data() returns the model object. self.assertEqual(result, result_in_db) def test_store_data_updates_existing(self): - node = factory.make_node() - name = factory.getRandomString(255) + node = factory.make_Node() + name = factory.make_string(255) script_result = randint(0, 10) - factory.make_node_commission_result(node=node, name=name) - data = factory.getRandomBytes(1024 * 1024) - NodeCommissionResult.objects.store_data( - node, name=name, script_result=script_result, data=Bin(data)) + factory.make_NodeResult_for_commissioning(node=node, name=name) + data = factory.make_bytes(1024 * 1024) + NodeResult.objects.store_data( + node, name=name, script_result=script_result, + result_type=RESULT_TYPE.COMMISSIONING, data=Bin(data)) self.assertAttributes( - NodeCommissionResult.objects.get(node=node), + NodeResult.objects.get(node=node), dict(name=name, data=data, script_result=script_result)) def test_get_data(self): - ncr = factory.make_node_commission_result() - result = NodeCommissionResult.objects.get_data(ncr.node, ncr.name) + ncr = factory.make_NodeResult_for_commissioning() + result = NodeResult.objects.get_data(ncr.node, ncr.name) self.assertEqual(ncr.data, result) def test_get_data_404s_when_not_found(self): - ncr = factory.make_node_commission_result() + ncr = factory.make_NodeResult_for_commissioning() self.assertRaises( Http404, - NodeCommissionResult.objects.get_data, ncr.node, "bad name") + NodeResult.objects.get_data, ncr.node, "bad name") diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_nodekey.py maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_nodekey.py --- maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_nodekey.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_nodekey.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :class:`NodeKey` model and manager.""" @@ -23,42 +23,42 @@ """Test NodeKeyManager.""" def test_get_token_for_node_registers_node_key(self): - node = factory.make_node() + node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) nodekey = NodeKey.objects.get(node=node, key=token.key) self.assertNotEqual(None, nodekey) self.assertEqual(token, nodekey.token) def test_get_node_for_key_finds_node(self): - node = factory.make_node() + node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) self.assertEqual(node, NodeKey.objects.get_node_for_key(token.key)) def test_get_node_for_key_raises_DoesNotExist_if_key_not_found(self): - non_key = factory.getRandomString() + non_key = factory.make_string() self.assertRaises( NodeKey.DoesNotExist, NodeKey.objects.get_node_for_key, non_key) def test_get_token_for_node_creates_token(self): - node = factory.make_node() + node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) self.assertEqual(node, NodeKey.objects.get_node_for_key(token.key)) def test_get_token_for_node_returns_existing_token(self): - node = factory.make_node() + node = factory.make_Node() original_token = NodeKey.objects.get_token_for_node(node) repeated_token = NodeKey.objects.get_token_for_node(node) self.assertEqual(original_token, repeated_token) def test_get_token_for_node_inverts_get_node_for_key(self): - node = factory.make_node() + node = factory.make_Node() self.assertEqual( node, NodeKey.objects.get_node_for_key( NodeKey.objects.get_token_for_node(node).key)) def test_get_node_for_key_inverts_get_token_for_node(self): - key = NodeKey.objects.get_token_for_node(factory.make_node()).key + key = NodeKey.objects.get_token_for_node(factory.make_Node()).key self.assertEqual( key, NodeKey.objects.get_token_for_node( diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_noderesults.py maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_noderesults.py --- maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_noderesults.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_noderesults.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,705 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test custom commissioning scripts.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import doctest +from inspect import getsource +from io import BytesIO +from math import ( + ceil, + floor, + ) +import os.path +from random import randint +import subprocess +from subprocess import ( + CalledProcessError, + check_output, + STDOUT, + ) +import tarfile +from textwrap import dedent +import time + +from fixtures import FakeLogger +from maasserver.fields import MAC +from maasserver.models import Config +from maasserver.models.tag import Tag +from maasserver.testing.factory import factory +from maasserver.testing.orm import reload_object +from maasserver.testing.testcase import ( + MAASServerTestCase, + TestWithoutCrochetMixin, + ) +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + ) +from maastesting.utils import sample_binary_data +from metadataserver.enum import RESULT_TYPE +from metadataserver.fields import Bin +from metadataserver.models import ( + CommissioningScript, + commissioningscript as cs_module, + ) +from metadataserver.models.commissioningscript import ( + ARCHIVE_PREFIX, + BUILTIN_COMMISSIONING_SCRIPTS, + DHCP_UNCONFIGURED_INTERFACES_NAME, + extract_router_mac_addresses, + get_builtin_commissioning_scripts, + inject_lldp_result, + inject_lshw_result, + inject_result, + LLDP_OUTPUT_NAME, + LSHW_OUTPUT_NAME, + make_function_call_script, + set_node_routers, + set_virtual_tag, + update_hardware_details, + ) +from metadataserver.models.noderesult import NodeResult +from mock import ( + call, + create_autospec, + Mock, + sentinel, + ) +from testtools.content import text_content +from testtools.matchers import ( + ContainsAll, + DocTestMatches, + MatchesStructure, + ) + + +def open_tarfile(content): + """Open tar file from raw binary data.""" + return tarfile.open(fileobj=BytesIO(content)) + + +def make_script_name(base_name=None, number=None): + """Make up a name for a commissioning script.""" + if base_name is None: + base_name = 'script' + if number is None: + number = randint(0, 99) + return factory.make_name( + '%0.2d-%s' % (number, factory.make_name(base_name))) + + +class TestCommissioningScriptManager(MAASServerTestCase): + + def test_get_archive_wraps_scripts_in_tar(self): + script = factory.make_CommissioningScript() + path = os.path.join(ARCHIVE_PREFIX, script.name) + archive = open_tarfile(CommissioningScript.objects.get_archive()) + self.assertTrue(archive.getmember(path).isfile()) + self.assertEqual(script.content, archive.extractfile(path).read()) + + def test_get_archive_wraps_all_scripts(self): + scripts = {factory.make_CommissioningScript() for counter in range(3)} + archive = open_tarfile(CommissioningScript.objects.get_archive()) + self.assertThat( + archive.getnames(), + ContainsAll({ + os.path.join(ARCHIVE_PREFIX, script.name) + for script in scripts + })) + + def test_get_archive_supports_binary_scripts(self): + script = factory.make_CommissioningScript(content=sample_binary_data) + path = os.path.join(ARCHIVE_PREFIX, script.name) + archive = open_tarfile(CommissioningScript.objects.get_archive()) + self.assertEqual(script.content, archive.extractfile(path).read()) + + def test_get_archive_includes_builtin_scripts(self): + name = factory.make_name('00-maas') + path = os.path.join(ARCHIVE_PREFIX, name) + content = factory.make_string().encode('ascii') + data = dict(name=name, content=content, hook='hook') + self.patch(cs_module, 'BUILTIN_COMMISSIONING_SCRIPTS', {name: data}) + archive = open_tarfile(CommissioningScript.objects.get_archive()) + self.assertIn(path, archive.getnames()) + self.assertEqual(content, archive.extractfile(path).read()) + + def test_get_archive_sets_sensible_mode(self): + for counter in range(3): + factory.make_CommissioningScript() + archive = open_tarfile(CommissioningScript.objects.get_archive()) + self.assertEqual({0755}, {info.mode for info in archive.getmembers()}) + + def test_get_archive_initializes_file_timestamps(self): + # The mtime on a file inside the tarball is reasonable. + # It would otherwise default to the Epoch, and GNU tar warns + # annoyingly about improbably old files. + start_time = floor(time.time()) + script = factory.make_CommissioningScript() + path = os.path.join(ARCHIVE_PREFIX, script.name) + archive = open_tarfile(CommissioningScript.objects.get_archive()) + timestamp = archive.getmember(path).mtime + end_time = ceil(time.time()) + self.assertGreaterEqual(timestamp, start_time) + self.assertLessEqual(timestamp, end_time) + + +class TestCommissioningScript(MAASServerTestCase): + + def test_scripts_may_be_binary(self): + name = make_script_name() + CommissioningScript.objects.create( + name=name, content=Bin(sample_binary_data)) + stored_script = CommissioningScript.objects.get(name=name) + self.assertEqual(sample_binary_data, stored_script.content) + + +class TestMakeFunctionCallScript(MAASServerTestCase): + + def run_script(self, script): + script_filename = self.make_file("test.py", script) + os.chmod(script_filename, 0700) + try: + return check_output((script_filename,), stderr=STDOUT) + except CalledProcessError as error: + self.addDetail("output", text_content(error.output)) + raise + + def test_basic(self): + def example_function(): + print("Hello, World!", end="") + script = make_function_call_script(example_function) + self.assertEqual(b"Hello, World!", self.run_script(script)) + + def test_positional_args_get_passed_through(self): + def example_function(a, b): + print("a=%s, b=%d" % (a, b), end="") + script = make_function_call_script(example_function, "foo", 12345) + self.assertEqual(b"a=foo, b=12345", self.run_script(script)) + + def test_keyword_args_get_passed_through(self): + def example_function(a, b): + print("a=%s, b=%d" % (a, b), end="") + script = make_function_call_script(example_function, a="foo", b=12345) + self.assertEqual(b"a=foo, b=12345", self.run_script(script)) + + def test_positional_and_keyword_args_get_passed_through(self): + def example_function(a, b): + print("a=%s, b=%d" % (a, b), end="") + script = make_function_call_script(example_function, "foo", b=12345) + self.assertEqual(b"a=foo, b=12345", self.run_script(script)) + + def test_non_ascii_positional_args_are_passed_without_corruption(self): + def example_function(text): + print(repr(text), end="") + script = make_function_call_script(example_function, "abc\u1234") + self.assertEqual(b"u'abc\\u1234'", self.run_script(script)) + + def test_non_ascii_keyword_args_are_passed_without_corruption(self): + def example_function(text): + print(repr(text), end="") + script = make_function_call_script(example_function, text="abc\u1234") + self.assertEqual(b"u'abc\\u1234'", self.run_script(script)) + + def test_structured_arguments_are_passed_though_too(self): + # Anything that can be JSON serialized can be passed. + def example_function(arg): + if arg == {"123": "foo", "bar": [4, 5, 6]}: + print("Equal") + else: + print("Unequal, got %s" % repr(arg)) + script = make_function_call_script( + example_function, {"123": "foo", "bar": [4, 5, 6]}) + self.assertEqual(b"Equal\n", self.run_script(script)) + + +def isolate_function(function): + """Recompile the given function in an empty namespace.""" + source = dedent(getsource(function)) + modcode = compile(source, "lldpd.py", "exec") + namespace = {} + exec(modcode, namespace) + return namespace[function.__name__] + + +class TestLLDPScripts(TestWithoutCrochetMixin, MAASServerTestCase): + + def test_install_script_installs_configures_and_restarts(self): + config_file = self.make_file("config", "# ...") + check_call = self.patch(subprocess, "check_call") + lldpd_install = isolate_function(cs_module.lldpd_install) + lldpd_install(config_file) + # lldpd is installed and restarted. + self.assertEqual( + check_call.call_args_list, + [ + call(("apt-get", "install", "--yes", "lldpd")), + call(("initctl", "reload-configuration")), + call(("service", "lldpd", "restart")) + ]) + # lldpd's config was updated to include an updated DAEMON_ARGS + # setting. Note that the new comment is on a new line, and + # does not interfere with existing config. + config_expected = dedent("""\ + # ... + # Configured by MAAS: + DAEMON_ARGS="-c -f -s -e -r" + """).encode("ascii") + with open(config_file, "rb") as fd: + config_observed = fd.read() + self.assertEqual(config_expected, config_observed) + + def test_wait_script_waits_for_lldpd(self): + reference_file = self.make_file("reference") + time_delay = 8.98 # seconds + lldpd_wait = isolate_function(cs_module.lldpd_wait) + # Do the patching as late as possible, because the setup may call + # one of the patched functions somewhere in the plumbing. We've had + # spurious test failures over this: bug 1283918. + self.patch(os.path, "getmtime").return_value = 10.65 + self.patch(time, "time").return_value = 14.12 + self.patch(time, "sleep") + + lldpd_wait(reference_file, time_delay) + + # lldpd_wait checks the mtime of the reference file, + self.assertThat(os.path.getmtime, MockCalledOnceWith(reference_file)) + # and gets the current time, + self.assertThat(time.time, MockCalledOnceWith()) + # then sleeps until time_delay seconds has passed since the + # mtime of the reference file. + self.assertThat(time.sleep, MockCalledOnceWith( + os.path.getmtime.return_value + time_delay - + time.time.return_value)) + + def test_capture_calls_lldpdctl(self): + check_call = self.patch(subprocess, "check_call") + lldpd_capture = isolate_function(cs_module.lldpd_capture) + lldpd_capture() + self.assertEqual( + check_call.call_args_list, + [call(("lldpctl", "-f", "xml"))]) + + +lldp_output_template = """ + + +%s + +""" + +lldp_output_interface_template = """ + + + %s + switch-name + HDFD5BG7J + 192.168.9.9 + + + + +""" + + +def make_lldp_output(macs): + """Return an example raw lldp output containing the given MACs.""" + interfaces = '\n'.join( + lldp_output_interface_template % mac + for mac in macs + ) + script = (lldp_output_template % interfaces).encode('utf8') + return bytes(script) + + +# The two following example outputs differ because eth2 and eth1 are not +# configured and thus 'ifconfig -s -a' returns a list with both 'eth1' +# and 'eth2' while 'ifconfig -s' does not contain them. + +# Example output of 'ifconfig -s -a': +ifconfig_all = """ +Iface MTU Met RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP +eth2 1500 0 0 0 0 0 0 0 +eth1 1500 0 0 0 0 0 0 0 +eth0 1500 0 1366127 0 0 0 831110 0 +lo 65536 0 38075 0 0 0 38075 0 +virbr0 1500 0 0 0 0 0 0 0 +wlan0 1500 0 2304695 0 0 0 1436049 0 +""" + +# Example output of 'ifconfig -s': +ifconfig_config = """ +Iface MTU Met RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP +eth0 1500 0 1366127 0 0 0 831110 0 +lo 65536 0 38115 0 0 0 38115 0 +virbr0 1500 0 0 0 0 0 0 0 +wlan0 1500 0 2304961 0 0 0 1436319 0 +""" + + +class TestDHCPExplore(MAASServerTestCase): + + def test_calls_dhclient_on_unconfigured_interfaces(self): + check_output = self.patch(subprocess, "check_output") + check_output.side_effect = [ifconfig_all, ifconfig_config] + mock_call = self.patch(subprocess, "call") + dhcp_explore = isolate_function(cs_module.dhcp_explore) + dhcp_explore() + self.assertThat( + mock_call, + MockCallsMatch( + call(["dhclient", "-nw", 'eth1']), + call(["dhclient", "-nw", 'eth2']))) + + +class TestExtractRouters(MAASServerTestCase): + + def test_extract_router_mac_addresses_returns_None_when_empty_input(self): + self.assertIsNone(extract_router_mac_addresses('')) + + def test_extract_router_mac_addresses_returns_empty_list(self): + lldp_output = make_lldp_output([]) + self.assertItemsEqual([], extract_router_mac_addresses(lldp_output)) + + def test_extract_router_mac_addresses_returns_routers_list(self): + macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] + lldp_output = make_lldp_output(macs) + routers = extract_router_mac_addresses(lldp_output) + self.assertItemsEqual(macs, routers) + + +class TestSetNodeRouters(MAASServerTestCase): + + def test_set_node_routers_updates_node(self): + node = factory.make_Node(routers=None) + macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] + lldp_output = make_lldp_output(macs) + set_node_routers(node, lldp_output, 0) + self.assertItemsEqual( + [MAC(mac) for mac in macs], reload_object(node).routers) + + def test_set_node_routers_updates_node_if_no_routers(self): + node = factory.make_Node() + lldp_output = make_lldp_output([]) + set_node_routers(node, lldp_output, 0) + self.assertItemsEqual([], reload_object(node).routers) + + def test_set_node_routers_does_nothing_if_script_failed(self): + node = factory.make_Node() + routers_before = node.routers + macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] + lldp_output = make_lldp_output(macs) + set_node_routers(node, lldp_output, exit_status=1) + routers_after = reload_object(node).routers + self.assertItemsEqual(routers_before, routers_after) + + +class TestInjectResult(MAASServerTestCase): + + def test_inject_result_stores_data(self): + node = factory.make_Node() + name = factory.make_name("result") + output = factory.make_bytes() + exit_status = next(factory.random_octets) + + inject_result(node, name, output, exit_status) + + self.assertThat( + NodeResult.objects.get(node=node, name=name), + MatchesStructure.byEquality( + node=node, name=name, script_result=exit_status, + result_type=RESULT_TYPE.COMMISSIONING, + data=output)) + + def test_inject_result_calls_hook(self): + node = factory.make_Node() + name = factory.make_name("result") + output = factory.make_bytes() + exit_status = next(factory.random_octets) + hook = Mock() + self.patch( + cs_module, "BUILTIN_COMMISSIONING_SCRIPTS", + {name: {"hook": hook}}) + + inject_result(node, name, output, exit_status) + + self.assertThat(hook, MockCalledOnceWith( + node=node, output=output, exit_status=exit_status)) + + def inject_lshw_result(self): + # inject_lshw_result() just calls through to inject_result(). + inject_result = self.patch( + cs_module, "inject_result", + create_autospec(cs_module.inject_result)) + inject_lshw_result(sentinel.node, sentinel.output, sentinel.status) + self.assertThat(inject_result, MockCalledOnceWith( + sentinel.node, LSHW_OUTPUT_NAME, sentinel.output, sentinel.status)) + + def inject_lldp_result(self): + # inject_lldp_result() just calls through to inject_result(). + inject_result = self.patch( + cs_module, "inject_result", + create_autospec(cs_module.inject_result)) + inject_lldp_result(sentinel.node, sentinel.output, sentinel.status) + self.assertThat(inject_result, MockCalledOnceWith( + sentinel.node, LLDP_OUTPUT_NAME, sentinel.output, sentinel.status)) + + +class TestSetVirtualTag(MAASServerTestCase): + + def getVirtualTag(self): + virtual_tag, _ = Tag.objects.get_or_create(name='virtual') + return virtual_tag + + def assertTagsEqual(self, node, tags): + self.assertItemsEqual( + tags, [tag.name for tag in node.tags.all()]) + + def test_sets_virtual_tag(self): + node = factory.make_Node() + self.assertTagsEqual(node, []) + set_virtual_tag(node, b"virtual", 0) + self.assertTagsEqual(node, ["virtual"]) + + def test_removes_virtual_tag(self): + node = factory.make_Node() + node.tags.add(self.getVirtualTag()) + self.assertTagsEqual(node, ["virtual"]) + set_virtual_tag(node, b"notvirtual", 0) + self.assertTagsEqual(node, []) + + def test_output_not_containing_virtual_does_not_set_tag(self): + logger = self.useFixture(FakeLogger()) + node = factory.make_Node() + self.assertTagsEqual(node, []) + set_virtual_tag(node, b"wibble", 0) + self.assertTagsEqual(node, []) + self.assertIn( + "Neither 'virtual' nor 'notvirtual' appeared in the captured " + "VIRTUALITY_SCRIPT output for node %s.\n" % node.system_id, + logger.output) + + def test_output_not_containing_virtual_does_not_remove_tag(self): + logger = self.useFixture(FakeLogger()) + node = factory.make_Node() + node.tags.add(self.getVirtualTag()) + self.assertTagsEqual(node, ["virtual"]) + set_virtual_tag(node, b"wibble", 0) + self.assertTagsEqual(node, ["virtual"]) + self.assertIn( + "Neither 'virtual' nor 'notvirtual' appeared in the captured " + "VIRTUALITY_SCRIPT output for node %s.\n" % node.system_id, + logger.output) + + +class TestUpdateHardwareDetails(MAASServerTestCase): + + doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE + + def test_hardware_updates_cpu_count(self): + node = factory.make_Node() + xmlbytes = dedent("""\ + + + + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(2, node.cpu_count) + + def test_cpu_count_counts_multi_cores(self): + node = factory.make_Node() + xmlbytes = dedent("""\ + + + + + + + + + + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(5, node.cpu_count) + + def test_cpu_count_skips_disabled_cpus(self): + node = factory.make_Node() + xmlbytes = dedent("""\ + + + + + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(1, node.cpu_count) + + def test_hardware_updates_memory(self): + node = factory.make_Node() + xmlbytes = dedent("""\ + + 4294967296 + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(4096, node.memory) + + def test_hardware_updates_memory_lenovo(self): + node = factory.make_Node() + xmlbytes = dedent("""\ + + + + 4294967296 + + + 3221225472 + + + + + 536870912 + + + + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + mega = 2 ** 20 + expected = (4294967296 + 3221225472 + 536879812) / mega + self.assertEqual(expected, node.memory) + + def test_hardware_updates_storage(self): + node = factory.make_Node() + xmlbytes = dedent("""\ + + Extended partition + 1 + scsi@0:0.0.0,1 + /dev/sda1 + 8:1 + 127033934848 + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(127033, node.storage) + + def test_hardware_updates_storage_1279728(self): + # Hardware data from bug 1279728. + node = factory.make_Node() + xmlbytes = dedent("""\ + + EXT4 volume + Linux + 1 + scsi@0:0.0.0,1 + /dev/sda1 + 801568677888 + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(801568, node.storage) + + def test_hardware_updates_storage_1387380_bad(self): + # Hardware data from bug 1387380 (the "bad" node). + node = factory.make_Node() + xmlbytes = dedent("""\ + + 120034123776 + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(120034, node.storage) + + def test_hardware_updates_storage_1387380_good(self): + # Hardware data from bug 1387380 (the "good" node). + node = factory.make_Node() + xmlbytes = dedent("""\ + + 120034123776 + + 120033075200 + 120033075200 + + + """).encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + self.assertEqual(120034, node.storage) + + def test_hardware_updates_ignores_empty_tags(self): + # Tags with empty definitions are ignored when + # update_hardware_details gets called. + factory.make_Tag(definition='') + node = factory.make_Node() + node.save() + xmlbytes = ''.encode("utf-8") + update_hardware_details(node, xmlbytes, 0) + node = reload_object(node) + # The real test is that update_hardware_details does not blow + # up, see bug 1131418. + self.assertEqual([], list(node.tags.all())) + + def test_hardware_updates_logs_invalid_xml(self): + logger = self.useFixture(FakeLogger()) + update_hardware_details(factory.make_Node(), b"garbage", 0) + expected_log = dedent("""\ + Invalid lshw data. + Traceback (most recent call last): + ... + XMLSyntaxError: Start tag expected, '<' not found, line 1, column 1 + """) + self.assertThat( + logger.output, DocTestMatches( + expected_log, self.doctest_flags)) + + def test_hardware_updates_does_nothing_when_exit_status_is_not_zero(self): + logger = self.useFixture(FakeLogger(name='commissioningscript')) + update_hardware_details(factory.make_Node(), b"garbage", exit_status=1) + self.assertEqual("", logger.output) + + +class TestGetBuiltinCommissioningScripts(MAASServerTestCase): + + def test__includes_all_builtin_commissioning_scripts_by_default(self): + self.assertItemsEqual( + BUILTIN_COMMISSIONING_SCRIPTS, + get_builtin_commissioning_scripts(), + ) + + def test__excludes_dhcp_discovery_when_disabled(self): + Config.objects.set_config( + 'enable_dhcp_discovery_on_unconfigured_interfaces', False) + self.assertNotIn( + DHCP_UNCONFIGURED_INTERFACES_NAME, + get_builtin_commissioning_scripts()) + + def test__includes_dhcp_discovery_when_enabled(self): + Config.objects.set_config( + 'enable_dhcp_discovery_on_unconfigured_interfaces', True) + self.assertIn( + DHCP_UNCONFIGURED_INTERFACES_NAME, + get_builtin_commissioning_scripts()) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_nodeuserdata.py maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_nodeuserdata.py --- maas-1.5.4+bzr2294/src/metadataserver/models/tests/test_nodeuserdata.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/models/tests/test_nodeuserdata.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :class:`NodeUserData` and manager.""" @@ -23,59 +23,84 @@ """Test NodeUserDataManager.""" def test_set_user_data_creates_new_nodeuserdata_if_needed(self): - node = factory.make_node() + node = factory.make_Node() data = b'foo' NodeUserData.objects.set_user_data(node, data) self.assertEqual(data, NodeUserData.objects.get(node=node).data) def test_set_user_data_overwrites_existing_userdata(self): - node = factory.make_node() + node = factory.make_Node() data = b'bar' NodeUserData.objects.set_user_data(node, b'old data') NodeUserData.objects.set_user_data(node, data) self.assertEqual(data, NodeUserData.objects.get(node=node).data) def test_set_user_data_leaves_data_for_other_nodes_alone(self): - node = factory.make_node() + node = factory.make_Node() NodeUserData.objects.set_user_data(node, b'intact') - NodeUserData.objects.set_user_data(factory.make_node(), b'unrelated') + NodeUserData.objects.set_user_data(factory.make_Node(), b'unrelated') self.assertEqual(b'intact', NodeUserData.objects.get(node=node).data) def test_set_user_data_to_None_removes_user_data(self): - node = factory.make_node() + node = factory.make_Node() NodeUserData.objects.set_user_data(node, b'original') NodeUserData.objects.set_user_data(node, None) self.assertItemsEqual([], NodeUserData.objects.filter(node=node)) def test_set_user_data_to_None_when_none_exists_does_nothing(self): - node = factory.make_node() + node = factory.make_Node() NodeUserData.objects.set_user_data(node, None) self.assertItemsEqual([], NodeUserData.objects.filter(node=node)) def test_get_user_data_retrieves_data(self): - node = factory.make_node() + node = factory.make_Node() data = b'splat' NodeUserData.objects.set_user_data(node, data) self.assertEqual(data, NodeUserData.objects.get_user_data(node)) def test_get_user_data_raises_DoesNotExist_if_not_found(self): - node = factory.make_node() + node = factory.make_Node() self.assertRaises( NodeUserData.DoesNotExist, NodeUserData.objects.get_user_data, node) def test_get_user_data_ignores_other_nodes(self): - node = factory.make_node() + node = factory.make_Node() data = b'bzzz' NodeUserData.objects.set_user_data(node, data) - NodeUserData.objects.set_user_data(factory.make_node(), b'unrelated') + NodeUserData.objects.set_user_data(factory.make_Node(), b'unrelated') self.assertEqual(data, NodeUserData.objects.get_user_data(node)) def test_has_user_data_returns_False_if_node_has_no_user_data(self): self.assertFalse( - NodeUserData.objects.has_user_data(factory.make_node())) + NodeUserData.objects.has_user_data(factory.make_Node())) def test_has_user_data_returns_True_if_node_has_user_data(self): - node = factory.make_node() + node = factory.make_Node() NodeUserData.objects.set_user_data(node, b"This node has user data.") self.assertTrue(NodeUserData.objects.has_user_data(node)) + + def test_bulk_set_user_data(self): + nodes = [factory.make_Node() for _ in xrange(5)] + data = factory.make_bytes() + NodeUserData.objects.bulk_set_user_data(nodes, data) + for node in nodes: + self.assertEqual(data, NodeUserData.objects.get_user_data(node)) + + def test_bulk_set_user_data_only_deletes_when_data_is_None(self): + nodes = [factory.make_Node() for _ in xrange(5)] + NodeUserData.objects.bulk_set_user_data(nodes, None) + for node in nodes: + self.assertRaises( + NodeUserData.DoesNotExist, + NodeUserData.objects.get_user_data, node) + + def test_bulk_set_user_data_with_preexisting_data(self): + nodes = [factory.make_Node() for _ in xrange(2)] + data1 = factory.make_bytes() + NodeUserData.objects.bulk_set_user_data(nodes, data1) + nodes.extend(factory.make_Node() for _ in xrange(3)) + data2 = factory.make_bytes() + NodeUserData.objects.bulk_set_user_data(nodes, data2) + for node in nodes: + self.assertEqual(data2, NodeUserData.objects.get_user_data(node)) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/tests/test_address.py maas-1.7.6+bzr3376/src/metadataserver/tests/test_address.py --- maas-1.5.4+bzr2294/src/metadataserver/tests/test_address.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/tests/test_address.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test server-address-guessing logic.""" @@ -14,8 +14,10 @@ __metaclass__ = type __all__ = [] +import random from socket import gethostname +from maastesting.factory import factory from maastesting.testcase import MAASTestCase from metadataserver import address from testtools.matchers import MatchesRegex @@ -115,17 +117,48 @@ def test_get_ip_address_finds_IP_address_of_interface(self): self.assertEqual('127.0.0.1', address.get_ip_address(b'lo')) + def test_get_ip_address_prefers_v4_addresses_to_v6(self): + addresses = [factory.make_ipv6_address() for _ in range(3)] + # We add a deliberately low v6 address to show that the v4 + # address is always preferred. + ipv6_address = "::1" + ipv4_address = factory.make_ipv4_address() + addresses.append(ipv6_address) + addresses.append(ipv4_address) + self.patch( + address, 'get_all_addresses_for_interface').return_value = ( + addresses) + self.assertEqual(ipv4_address, address.get_ip_address(b'lo')) + + def test_get_ip_address_returns_v6_address_if_no_v4_available(self): + ipv6_address = factory.make_ipv6_address() + self.patch( + address, 'get_all_addresses_for_interface').return_value = ( + [ipv6_address]) + self.assertEqual(ipv6_address, address.get_ip_address(b'lo')) + + def test_get_ip_address_returns_consistent_result_from_address_set(self): + addresses = [factory.make_ipv6_address() for _ in range(5)] + expected_address = sorted(addresses)[0] + for _ in range(5): + random.shuffle(addresses) + self.patch( + address, 'get_all_addresses_for_interface').return_value = ( + addresses) + self.assertEqual( + expected_address, address.get_ip_address(b'lo')) + def test_get_ip_address_returns_None_on_failure(self): self.assertIsNone(address.get_ip_address(b'ethturboveyronsuper9')) - def test_guess_server_address_finds_IP_address(self): + def test_guess_server_host_finds_IP_address(self): self.assertThat( - address.guess_server_address(), + address.guess_server_host(), MatchesRegex("^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$")) - def test_guess_server_address_returns_hostname_as_last_ditch_guess(self): + def test_guess_server_host_returns_hostname_as_last_ditch_guess(self): def return_empty_list(*args): return [] self.patch(address, 'get_command_output', return_empty_list) - self.assertEqual(gethostname(), address.guess_server_address()) + self.assertEqual(gethostname(), address.guess_server_host()) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/tests/test_api.py maas-1.7.6+bzr3376/src/metadataserver/tests/test_api.py --- maas-1.5.4+bzr2294/src/metadataserver/tests/test_api.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/tests/test_api.py 2015-07-10 01:27:14.000000000 +0000 @@ -25,6 +25,8 @@ from django.conf import settings from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse +from maasserver import preseed as preseed_module +from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.enum import ( NODE_STATUS, NODEGROUP_STATUS, @@ -35,15 +37,21 @@ Unauthorized, ) from maasserver.models import ( + Event, SSHKey, Tag, ) -from maasserver.testing import reload_object +from maasserver.models.node import Node +from maasserver.rpc.testing.mixins import PreseedRPCMixin from maasserver.testing.factory import factory from maasserver.testing.oauthclient import OAuthAuthenticatedClient +from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maastesting.djangotestcase import DjangoTestCase -from maastesting.matchers import MockCalledOnceWith +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) from maastesting.utils import sample_binary_data from metadataserver import api from metadataserver.api import ( @@ -54,11 +62,12 @@ make_list_response, make_text_response, MetaDataHandler, + poweroff as api_poweroff, UnknownMetadataVersion, ) from metadataserver.models import ( - NodeCommissionResult, NodeKey, + NodeResult, NodeUserData, ) from metadataserver.models.commissioningscript import ARCHIVE_PREFIX @@ -68,6 +77,10 @@ Mock, ) from netaddr import IPNetwork +from provisioningserver.events import ( + EVENT_DETAILS, + EVENT_TYPES, + ) from testtools.matchers import ( Contains, ContainsAll, @@ -106,7 +119,7 @@ self.assertRaises(UnknownMetadataVersion, check_version, '1.0') def test_get_node_for_request_finds_node(self): - node = factory.make_node() + node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) request = self.fake_request( HTTP_AUTHORIZATION=factory.make_oauth_header( @@ -121,24 +134,24 @@ def test_get_node_for_mac_refuses_if_anonymous_access_disabled(self): self.patch(settings, 'ALLOW_UNSAFE_METADATA_ACCESS', False) self.assertRaises( - PermissionDenied, get_node_for_mac, factory.getRandomMACAddress()) + PermissionDenied, get_node_for_mac, factory.make_mac_address()) def test_get_node_for_mac_raises_404_for_unknown_mac(self): self.assertRaises( - MAASAPINotFound, get_node_for_mac, factory.getRandomMACAddress()) + MAASAPINotFound, get_node_for_mac, factory.make_mac_address()) def test_get_node_for_mac_finds_node_by_mac(self): - mac = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() self.assertEqual(mac.node, get_node_for_mac(mac.mac_address)) def test_get_queried_node_looks_up_by_mac_if_given(self): - mac = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() self.assertEqual( mac.node, get_queried_node(object(), for_mac=mac.mac_address)) def test_get_queried_node_looks_up_oauth_key_by_default(self): - node = factory.make_node() + node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) request = self.fake_request( HTTP_AUTHORIZATION=factory.make_oauth_header( @@ -149,7 +162,7 @@ def make_node_client(node=None): """Create a test client logged in as if it were `node`.""" if node is None: - node = factory.make_node() + node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) return OAuthAuthenticatedClient(get_node_init_user(), token) @@ -166,7 +179,7 @@ to the "signal" call. """ if client is None: - client = make_node_client(factory.make_node( + client = make_node_client(factory.make_Node( status=NODE_STATUS.COMMISSIONING)) params = { 'op': 'signal', @@ -240,7 +253,7 @@ self.assertNotIn('user-data', items) def test_version_index_shows_user_data_if_available(self): - node = factory.make_node() + node = factory.make_Node() NodeUserData.objects.set_user_data(node, b"User data for node") client = make_node_client(node) view_name = self.get_metadata_name('-version') @@ -251,7 +264,7 @@ def test_meta_data_view_lists_fields(self): # Some fields only are returned if there is data related to them. user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') - node = factory.make_node(owner=user) + node = factory.make_Node(owner=user) client = make_node_client(node=node) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) @@ -281,12 +294,12 @@ self.assertNotIn(None, producers) def test_meta_data_local_hostname_returns_fqdn(self): - nodegroup = factory.make_node_group( + nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) - hostname = factory.getRandomString() - domain = factory.getRandomString() - node = factory.make_node( + hostname = factory.make_string() + domain = factory.make_string() + node = factory.make_Node( hostname='%s.%s' % (hostname, domain), nodegroup=nodegroup) client = make_node_client(node) view_name = self.get_metadata_name('-meta-data') @@ -298,7 +311,7 @@ self.assertIn('text/plain', response['Content-Type']) def test_meta_data_instance_id_returns_system_id(self): - node = factory.make_node() + node = factory.make_Node() client = make_node_client(node) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'instance-id']) @@ -318,7 +331,7 @@ def test_public_keys_listed_for_node_with_public_keys(self): user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') - node = factory.make_node(owner=user) + node = factory.make_Node(owner=user) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) client = make_node_client(node=node) @@ -337,7 +350,7 @@ def test_public_keys_for_node_returns_list_of_keys(self): user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') - node = factory.make_node(owner=user) + node = factory.make_Node(owner=user) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'public-keys']) client = make_node_client(node=node) @@ -354,7 +367,7 @@ # The metadata service also accepts urls with any number of additional # slashes after 'metadata': e.g. http://host/metadata///rest-of-url. user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') - node = factory.make_node(owner=user) + node = factory.make_Node(owner=user) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'public-keys']) # Insert additional slashes. @@ -371,7 +384,7 @@ """Tests for the metadata user-data API endpoint.""" def test_user_data_view_returns_binary_data(self): - node = factory.make_node() + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) NodeUserData.objects.set_user_data(node, sample_binary_data) client = make_node_client(node) response = client.get(reverse('metadata-user-data', args=['latest'])) @@ -381,22 +394,60 @@ (httplib.OK, sample_binary_data), (response.status_code, response.content)) + def test_poweroff_user_data_returned_if_unexpected_status(self): + node = factory.make_Node(status=NODE_STATUS.READY) + NodeUserData.objects.set_user_data(node, sample_binary_data) + client = make_node_client(node) + user_data = factory.make_name('user data').encode("ascii") + self.patch(api_poweroff, 'generate_user_data').return_value = user_data + response = client.get(reverse('metadata-user-data', args=['latest'])) + self.assertEqual('application/octet-stream', response['Content-Type']) + self.assertIsInstance(response.content, bytes) + self.assertEqual( + (httplib.OK, user_data), + (response.status_code, response.content)) + def test_user_data_for_node_without_user_data_returns_not_found(self): - client = make_node_client() + client = make_node_client( + factory.make_Node(status=NODE_STATUS.COMMISSIONING)) response = client.get(reverse('metadata-user-data', args=['latest'])) self.assertEqual(httplib.NOT_FOUND, response.status_code) -class TestCurtinMetadataUserData(DjangoTestCase): +class TestMetadataUserDataStateChanges(MAASServerTestCase): + """Tests for the metadata user-data API endpoint.""" + + def test_request_does_not_cause_status_change_if_not_deploying(self): + status = factory.pick_enum( + NODE_STATUS, but_not=[NODE_STATUS.DEPLOYING]) + node = factory.make_Node(status=status) + NodeUserData.objects.set_user_data(node, sample_binary_data) + client = make_node_client(node) + response = client.get(reverse('metadata-user-data', args=['latest'])) + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual(status, reload_object(node).status) + + def test_request_causes_status_change_if_deploying(self): + node = factory.make_Node(status=NODE_STATUS.DEPLOYING) + NodeUserData.objects.set_user_data(node, sample_binary_data) + client = make_node_client(node) + response = client.get(reverse('metadata-user-data', args=['latest'])) + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual(NODE_STATUS.DEPLOYED, reload_object(node).status) + + +class TestCurtinMetadataUserData(PreseedRPCMixin, DjangoTestCase): """Tests for the curtin-metadata user-data API endpoint.""" def test_curtin_user_data_view_returns_curtin_data(self): - node = factory.make_node() + node = factory.make_Node(nodegroup=self.rpc_nodegroup, mac=True) + factory.make_NodeGroupInterface( + node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') - factory.make_boot_image( - architecture=arch, subarchitecture=subarch, - release=node.get_distro_series(), purpose='xinstall', - nodegroup=node.nodegroup) + boot_image = make_rpc_boot_image(purpose='xinstall') + self.patch( + preseed_module, + 'get_boot_images_for').return_value = [boot_image] client = make_node_client(node) response = client.get( reverse('curtin-metadata-user-data', args=['latest'])) @@ -405,10 +456,77 @@ self.assertIn("PREFIX='curtin'", response.content) +class TestInstallingAPI(MAASServerTestCase): + + def test_other_user_than_node_cannot_signal_installation_result(self): + node = factory.make_Node(status=NODE_STATUS.DEPLOYING) + client = OAuthAuthenticatedClient(factory.make_User()) + response = call_signal(client) + self.assertEqual(httplib.FORBIDDEN, response.status_code) + self.assertEqual( + NODE_STATUS.DEPLOYING, reload_object(node).status) + + def test_signaling_installation_result_does_not_affect_other_node(self): + node = factory.make_Node(status=NODE_STATUS.DEPLOYING) + client = make_node_client( + node=factory.make_Node(status=NODE_STATUS.DEPLOYING)) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual( + NODE_STATUS.DEPLOYING, reload_object(node).status) + + def test_signaling_installation_success_leaves_node_deploying(self): + node = factory.make_Node(mac=True, status=NODE_STATUS.DEPLOYING) + client = make_node_client(node=node) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual(NODE_STATUS.DEPLOYING, reload_object(node).status) + + def test_signaling_installation_success_is_idempotent(self): + node = factory.make_Node(status=NODE_STATUS.DEPLOYING) + client = make_node_client(node=node) + call_signal(client, status='OK') + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual(NODE_STATUS.DEPLOYING, reload_object(node).status) + + def test_signaling_installation_success_does_not_clear_owner(self): + node = factory.make_Node( + status=NODE_STATUS.DEPLOYING, owner=factory.make_User()) + client = make_node_client(node=node) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual(node.owner, reload_object(node).owner) + + def test_signaling_installation_failure_makes_node_failed(self): + node = factory.make_Node( + status=NODE_STATUS.DEPLOYING, owner=factory.make_User()) + client = make_node_client(node=node) + response = call_signal(client, status='FAILED') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual( + NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) + + def test_signaling_installation_failure_is_idempotent(self): + node = factory.make_Node( + status=NODE_STATUS.DEPLOYING, owner=factory.make_User()) + client = make_node_client(node=node) + call_signal(client, status='FAILED') + response = call_signal(client, status='FAILED') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual( + NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) + + class TestCommissioningAPI(MAASServerTestCase): + def setUp(self): + super(TestCommissioningAPI, self).setUp() + self.patch(Node, 'stop_transition_monitor') + self.patch(Node, 'delete_host_maps') + def test_commissioning_scripts(self): - script = factory.make_commissioning_script() + script = factory.make_CommissioningScript() response = make_node_client().get( reverse('commissioning-scripts', args=['latest'])) self.assertEqual( @@ -429,17 +547,17 @@ archive.getnames()) def test_other_user_than_node_cannot_signal_commissioning_result(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - client = OAuthAuthenticatedClient(factory.make_user()) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + client = OAuthAuthenticatedClient(factory.make_User()) response = call_signal(client) self.assertEqual(httplib.FORBIDDEN, response.status_code) self.assertEqual( NODE_STATUS.COMMISSIONING, reload_object(node).status) def test_signaling_commissioning_result_does_not_affect_other_node(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client( - node=factory.make_node(status=NODE_STATUS.COMMISSIONING)) + node=factory.make_Node(status=NODE_STATUS.COMMISSIONING)) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( @@ -448,7 +566,7 @@ def test_signaling_commissioning_OK_repopulates_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='OK', script_result='0') self.assertEqual(httplib.OK, response.status_code) @@ -458,29 +576,29 @@ MockCalledOnceWith(ANY, node)) def test_signaling_requires_status_code(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) url = reverse('metadata-version', args=['latest']) response = client.post(url, {'op': 'signal'}) self.assertEqual(httplib.BAD_REQUEST, response.status_code) def test_signaling_rejects_unknown_status_code(self): - response = call_signal(status=factory.getRandomString()) + response = call_signal(status=factory.make_string()) self.assertEqual(httplib.BAD_REQUEST, response.status_code) def test_signaling_refuses_if_node_in_unexpected_state(self): - node = factory.make_node(status=NODE_STATUS.DECLARED) + node = factory.make_Node(status=NODE_STATUS.NEW) client = make_node_client(node=node) response = call_signal(client) self.assertEqual( ( httplib.CONFLICT, - "Node wasn't commissioning (status is Declared)", + "Node wasn't commissioning/installing (status is New)", ), (response.status_code, response.content)) def test_signaling_accepts_WORKING_status(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='WORKING') self.assertEqual(httplib.OK, response.status_code) @@ -488,30 +606,30 @@ NODE_STATUS.COMMISSIONING, reload_object(node).status) def test_signaling_stores_script_result(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) - filename = factory.getRandomString() + filename = factory.make_string() response = call_signal( client, script_result=script_result, - files={filename: factory.getRandomString().encode('ascii')}) + files={filename: factory.make_string().encode('ascii')}) self.assertEqual(httplib.OK, response.status_code, response.content) - result = NodeCommissionResult.objects.get(node=node) + result = NodeResult.objects.get(node=node) self.assertEqual(script_result, result.script_result) def test_signaling_stores_empty_script_result(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal( client, script_result=random.randint(0, 10), - files={factory.getRandomString(): ''.encode('ascii')}) + files={factory.make_string(): ''.encode('ascii')}) self.assertEqual(httplib.OK, response.status_code, response.content) - result = NodeCommissionResult.objects.get(node=node) + result = NodeResult.objects.get(node=node) self.assertEqual('', result.data) def test_signaling_WORKING_keeps_owner(self): - user = factory.make_user() - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + user = factory.make_User() + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) node.owner = user node.save() client = make_node_client(node=node) @@ -520,14 +638,21 @@ self.assertEqual(user, reload_object(node).owner) def test_signaling_commissioning_success_makes_node_Ready(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.READY, reload_object(node).status) + def test_signalling_commissioning_success_cancels_monitor(self): + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + client = make_node_client(node=node) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code, response.content) + self.assertThat(node.stop_transition_monitor, MockCalledOnceWith()) + def test_signaling_commissioning_success_is_idempotent(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) call_signal(client, status='OK') response = call_signal(client, status='OK') @@ -535,8 +660,8 @@ self.assertEqual(NODE_STATUS.READY, reload_object(node).status) def test_signaling_commissioning_success_clears_owner(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - node.owner = factory.make_user() + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + node.owner = factory.make_User() node.save() client = make_node_client(node=node) response = call_signal(client, status='OK') @@ -544,31 +669,40 @@ self.assertEqual(None, reload_object(node).owner) def test_signaling_commissioning_failure_makes_node_Failed_Tests(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(NODE_STATUS.FAILED_TESTS, reload_object(node).status) + self.assertEqual( + NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) + + def test_signalling_commissioning_failure_cancels_monitor(self): + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + client = make_node_client(node=node) + response = call_signal(client, status='FAILED') + self.assertEqual(httplib.OK, response.status_code, response.content) + self.assertThat(node.stop_transition_monitor, MockCalledOnceWith()) def test_signaling_commissioning_failure_is_idempotent(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) call_signal(client, status='FAILED') response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) - self.assertEqual(NODE_STATUS.FAILED_TESTS, reload_object(node).status) + self.assertEqual( + NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) def test_signaling_commissioning_failure_sets_node_error(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) - error_text = factory.getRandomString() + error_text = factory.make_string() response = call_signal(client, status='FAILED', error=error_text) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(error_text, reload_object(node).error) def test_signaling_commissioning_failure_clears_owner(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) - node.owner = factory.make_user() + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + node.owner = factory.make_User() node.save() client = make_node_client(node=node) response = call_signal(client, status='FAILED') @@ -576,8 +710,8 @@ self.assertEqual(None, reload_object(node).owner) def test_signaling_no_error_clears_existing_error(self): - node = factory.make_node( - status=NODE_STATUS.COMMISSIONING, error=factory.getRandomString()) + node = factory.make_Node( + status=NODE_STATUS.COMMISSIONING, error=factory.make_string()) client = make_node_client(node=node) response = call_signal(client) self.assertEqual(httplib.OK, response.status_code) @@ -585,9 +719,9 @@ def test_signalling_stores_files_for_any_status(self): statuses = ['WORKING', 'OK', 'FAILED'] - filename = factory.getRandomString() + filename = factory.make_string() nodes = { - status: factory.make_node(status=NODE_STATUS.COMMISSIONING) + status: factory.make_Node(status=NODE_STATUS.COMMISSIONING) for status in statuses} for status, node in nodes.items(): client = make_node_client(node=node) @@ -595,27 +729,27 @@ call_signal( client, status=status, script_result=script_result, - files={filename: factory.getRandomBytes()}) + files={filename: factory.make_bytes()}) self.assertEqual( {status: filename for status in statuses}, { - status: NodeCommissionResult.objects.get(node=node).name + status: NodeResult.objects.get(node=node).name for status, node in nodes.items()}) def test_signal_stores_file_contents(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) - text = factory.getRandomString().encode('ascii') + text = factory.make_string().encode('ascii') script_result = random.randint(0, 10) response = call_signal( client, script_result=script_result, files={'file.txt': text}) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( - text, NodeCommissionResult.objects.get_data(node, 'file.txt')) + text, NodeResult.objects.get_data(node, 'file.txt')) def test_signal_stores_binary(self): unicode_text = '<\u2621>' - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) response = call_signal( @@ -624,14 +758,13 @@ self.assertEqual(httplib.OK, response.status_code) self.assertEqual( unicode_text.encode("utf-8"), - NodeCommissionResult.objects.get_data(node, 'file.txt')) + NodeResult.objects.get_data(node, 'file.txt')) def test_signal_stores_multiple_files(self): contents = { - factory.getRandomString(): factory.getRandomString().encode( - 'ascii') + factory.make_string(): factory.make_string().encode('ascii') for counter in range(3)} - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) response = call_signal( @@ -641,7 +774,7 @@ contents, { result.name: result.data - for result in node.nodecommissionresult_set.all() + for result in node.noderesult_set.all() }) def test_signal_stores_files_up_to_documented_size_limit(self): @@ -649,20 +782,20 @@ # one megabyte. What happens above this limit is none of # anybody's business, but files up to this size should work. size_limit = 2 ** 20 - contents = factory.getRandomString(size_limit, spaces=True) - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + contents = factory.make_string(size_limit, spaces=True) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) response = call_signal( client, script_result=script_result, files={'output.txt': contents.encode('utf-8')}) self.assertEqual(httplib.OK, response.status_code) - stored_data = NodeCommissionResult.objects.get_data( + stored_data = NodeResult.objects.get_data( node, 'output.txt') self.assertEqual(size_limit, len(stored_data)) def test_signal_stores_virtual_tag_on_node_if_virtual(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) content = 'virtual'.encode('utf-8') response = call_signal( @@ -674,7 +807,7 @@ ["virtual"], [each_tag.name for each_tag in node.tags.all()]) def test_signal_removes_virtual_tag_on_node_if_not_virtual(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) tag, _ = Tag.objects.get_or_create(name='virtual') node.tags.add(tag) client = make_node_client(node=node) @@ -688,7 +821,7 @@ [], [each_tag.name for each_tag in node.tags.all()]) def test_signal_leaves_untagged_physical_node_unaltered(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) content = 'notvirtual'.encode('utf-8') response = call_signal( @@ -698,8 +831,23 @@ node = reload_object(node) self.assertEqual(0, len(node.tags.all())) + def test_signal_current_power_type_mscm_does_not_store_params(self): + node = factory.make_Node( + power_type="mscm", status=NODE_STATUS.COMMISSIONING) + client = make_node_client(node=node) + params = dict( + power_address=factory.make_string(), + power_user=factory.make_string(), + power_pass=factory.make_string()) + response = call_signal( + client, power_type="moonshot", power_parameters=json.dumps(params)) + self.assertEqual(httplib.OK, response.status_code, response.content) + node = reload_object(node) + self.assertEqual("mscm", node.power_type) + self.assertNotEqual(params, node.power_parameters) + def test_signal_refuses_bad_power_type(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, power_type="foo") self.assertEqual( @@ -707,12 +855,12 @@ (response.status_code, response.content)) def test_signal_power_type_stores_params(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) params = dict( - power_address=factory.getRandomString(), - power_user=factory.getRandomString(), - power_pass=factory.getRandomString()) + power_address=factory.make_string(), + power_user=factory.make_string(), + power_pass=factory.make_string()) response = call_signal( client, power_type="ipmi", power_parameters=json.dumps(params)) self.assertEqual(httplib.OK, response.status_code, response.content) @@ -721,12 +869,12 @@ self.assertEqual(params, node.power_parameters) def test_signal_power_type_lower_case_works(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) params = dict( - power_address=factory.getRandomString(), - power_user=factory.getRandomString(), - power_pass=factory.getRandomString()) + power_address=factory.make_string(), + power_user=factory.make_string(), + power_pass=factory.make_string()) response = call_signal( client, power_type="ipmi", power_parameters=json.dumps(params)) self.assertEqual(httplib.OK, response.status_code, response.content) @@ -735,7 +883,7 @@ params, node.power_parameters) def test_signal_invalid_power_parameters(self): - node = factory.make_node(status=NODE_STATUS.COMMISSIONING) + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal( client, power_type="ipmi", power_parameters="badjson") @@ -743,11 +891,59 @@ (httplib.BAD_REQUEST, "Failed to parse JSON power_parameters"), (response.status_code, response.content)) + def test_signal_clears_dynamic_ip_address_leases_if_not_WORKING(self): + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + ips = [factory.make_ipv4_address() for _ in range(2)] + self.patch(Node, 'dynamic_ip_addresses').return_value = ips + client = make_node_client(node=node) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code, response.content) + self.assertThat(node.delete_host_maps, MockCalledOnceWith(set(ips))) + + def test_signal_does_not_clear_dynamic_ip_address_leases_if_WORKING(self): + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + ips = [factory.make_ipv4_address() for _ in range(2)] + self.patch(Node, 'dynamic_ip_addresses').return_value = ips + client = make_node_client(node=node) + response = call_signal(client, status='WORKING') + self.assertEqual(httplib.OK, response.status_code, response.content) + self.assertThat(node.delete_host_maps, MockNotCalled()) + + def test_signal_doesnt_clear_dynamic_ip_leases_if_not_commissioning(self): + node = factory.make_Node(status=NODE_STATUS.DEPLOYING) + ips = [factory.make_ipv4_address() for _ in range(2)] + self.patch(Node, 'dynamic_ip_addresses').return_value = ips + client = make_node_client(node=node) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code, response.content) + self.assertThat(node.delete_host_maps, MockNotCalled()) + + +class TestDiskErasingAPI(MAASServerTestCase): + + def test_signaling_erasing_failure_makes_node_failed_erasing(self): + node = factory.make_Node( + status=NODE_STATUS.DISK_ERASING, owner=factory.make_User()) + client = make_node_client(node=node) + response = call_signal(client, status='FAILED') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual( + NODE_STATUS.FAILED_DISK_ERASING, reload_object(node).status) + + def test_signaling_erasing_ok_releases_node(self): + node = factory.make_Node( + status=NODE_STATUS.DISK_ERASING, owner=factory.make_User()) + client = make_node_client(node=node) + response = call_signal(client, status='OK') + self.assertEqual(httplib.OK, response.status_code) + self.assertEqual( + NODE_STATUS.READY, reload_object(node).status) + class TestByMACMetadataAPI(DjangoTestCase): def test_api_retrieves_node_metadata_by_mac(self): - mac = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() url = reverse( 'metadata-meta-data-by-mac', args=['latest', mac.mac_address, 'instance-id']) @@ -757,8 +953,9 @@ (response.status_code, response.content)) def test_api_retrieves_node_userdata_by_mac(self): - mac = factory.make_mac_address() - user_data = factory.getRandomString().encode('ascii') + node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) + mac = factory.make_MACAddress(node=node) + user_data = factory.make_string().encode('ascii') NodeUserData.objects.set_user_data(mac.node, user_data) url = reverse( 'metadata-user-data-by-mac', args=['latest', mac.mac_address]) @@ -769,7 +966,7 @@ def test_api_normally_disallows_anonymous_node_metadata_access(self): self.patch(settings, 'ALLOW_UNSAFE_METADATA_ACCESS', False) - mac = factory.make_mac_address() + mac = factory.make_MACAddress_with_Node() url = reverse( 'metadata-meta-data-by-mac', args=['latest', mac.mac_address, 'instance-id']) @@ -780,7 +977,7 @@ class TestNetbootOperationAPI(DjangoTestCase): def test_netboot_off(self): - node = factory.make_node(netboot=True) + node = factory.make_Node(netboot=True) client = make_node_client(node=node) url = reverse('metadata-version', args=['latest']) response = client.post(url, {'op': 'netboot_off'}) @@ -788,7 +985,7 @@ self.assertFalse(node.netboot, response) def test_netboot_on(self): - node = factory.make_node(netboot=False) + node = factory.make_Node(netboot=False) client = make_node_client(node=node) url = reverse('metadata-version', args=['latest']) response = client.post(url, {'op': 'netboot_on'}) @@ -799,7 +996,7 @@ class TestAnonymousAPI(DjangoTestCase): def test_anonymous_netboot_off(self): - node = factory.make_node(netboot=True) + node = factory.make_Node(netboot=True) anon_netboot_off_url = reverse( 'metadata-node-by-id', args=['latest', node.system_id]) response = self.client.post( @@ -815,7 +1012,7 @@ anon_enlist_preseed_url = reverse( 'metadata-enlist-preseed', args=['latest']) # Fake the preseed so we're just exercising the view. - fake_preseed = factory.getRandomString() + fake_preseed = factory.make_string() self.patch(api, "get_enlist_preseed", Mock(return_value=fake_preseed)) response = self.client.get( anon_enlist_preseed_url, {'op': 'get_enlist_preseed'}) @@ -831,8 +1028,10 @@ def test_anonymous_get_enlist_preseed_detects_request_origin(self): ng_url = 'http://%s' % factory.make_name('host') network = IPNetwork("10.1.1/24") - ip = factory.getRandomIPInNetwork(network) - factory.make_node_group(maas_url=ng_url, network=network) + ip = factory.pick_ip_in_network(network) + factory.make_NodeGroup( + maas_url=ng_url, network=network, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) anon_enlist_preseed_url = reverse( 'metadata-enlist-preseed', args=['latest']) response = self.client.get( @@ -842,12 +1041,12 @@ def test_anonymous_get_preseed(self): # The preseed for a node can be obtained anonymously. - node = factory.make_node() + node = factory.make_Node() anon_node_url = reverse( 'metadata-node-by-id', args=['latest', node.system_id]) # Fake the preseed so we're just exercising the view. - fake_preseed = factory.getRandomString() + fake_preseed = factory.make_string() self.patch(api, "get_preseed", lambda node: fake_preseed) response = self.client.get( anon_node_url, {'op': 'get_preseed'}) @@ -860,6 +1059,26 @@ response.content), response) + def test_anoymous_netboot_off_adds_installation_finished_event(self): + node = factory.make_Node(netboot=True) + anon_netboot_off_url = reverse( + 'metadata-node-by-id', args=['latest', node.system_id]) + self.client.post( + anon_netboot_off_url, {'op': 'netboot_off'}) + latest_event = Event.objects.filter(node=node).last() + self.assertEqual( + ( + EVENT_TYPES.NODE_INSTALLATION_FINISHED, + EVENT_DETAILS[ + EVENT_TYPES.NODE_INSTALLATION_FINISHED].description, + "Node disabled netboot", + ), + ( + latest_event.type.name, + latest_event.type.description, + latest_event.description, + )) + class TestEnlistViews(DjangoTestCase): """Tests for the enlistment metadata views.""" @@ -907,7 +1126,7 @@ def test_get_userdata(self): # instance-id must be available ud_url = reverse('enlist-metadata-user-data', args=['latest']) - fake_preseed = factory.getRandomString() + fake_preseed = factory.make_string() self.patch( api, "get_enlist_userdata", Mock(return_value=fake_preseed)) response = self.client.get(ud_url) @@ -921,8 +1140,10 @@ maas_url = 'http://%s' % factory.make_hostname() self.patch(settings, 'DEFAULT_MAAS_URL', maas_url) network = IPNetwork("10.1.1/24") - ip = factory.getRandomIPInNetwork(network) - factory.make_node_group(maas_url=nodegroup_url, network=network) + ip = factory.pick_ip_in_network(network) + factory.make_NodeGroup( + maas_url=nodegroup_url, network=network, + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) url = reverse('enlist-metadata-user-data', args=['latest']) response = self.client.get(url, REMOTE_ADDR=ip) self.assertThat( diff -Nru maas-1.5.4+bzr2294/src/metadataserver/tests/test_fields.py maas-1.7.6+bzr3376/src/metadataserver/tests/test_fields.py --- maas-1.5.4+bzr2294/src/metadataserver/tests/test_fields.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/tests/test_fields.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test custom field types.""" @@ -43,7 +43,7 @@ # Bin() returns a base-64 encoded string so that it can be # transmitted in JSON. self.assertEqual(b"", Bin(b"").__emittable__()) - example_bytes = factory.getRandomBytes() + example_bytes = factory.make_bytes() self.assertEqual( b64encode(example_bytes), Bin(example_bytes).__emittable__()) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/urls.py maas-1.7.6+bzr3376/src/metadataserver/urls.py --- maas-1.5.4+bzr2294/src/metadataserver/urls.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/urls.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Metadata API URLs.""" @@ -20,8 +20,8 @@ patterns, url, ) -from maasserver.api_auth import api_auth -from maasserver.api_support import OperationsResource +from maasserver.api.auth import api_auth +from maasserver.api.support import OperationsResource from metadataserver.api import ( AnonMetaDataHandler, CommissioningScriptsHandler, diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/commissioning.py maas-1.7.6+bzr3376/src/metadataserver/user_data/commissioning.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/commissioning.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/commissioning.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,32 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""User data generation for Commissioning.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from metadataserver.user_data.snippets import get_userdata_template_dir +from metadataserver.user_data.utils import ( + generate_user_data as _generate_user_data, + ) + + +def generate_user_data(node): + """Produce the main commissioning script. + + :rtype: `bytes` + """ + userdata_dir = get_userdata_template_dir() + result = _generate_user_data( + node, userdata_dir, 'user_data.template', + 'user_data_config.template') + return result diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/disk_erasing.py maas-1.7.6+bzr3376/src/metadataserver/user_data/disk_erasing.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/disk_erasing.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/disk_erasing.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Disk erasing userdata generation.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "generate_user_data", +] + +from metadataserver.user_data.snippets import get_userdata_template_dir +from metadataserver.user_data.utils import ( + generate_user_data as _generate_user_data, + ) + + +def generate_user_data(node): + """Produce the disk erase script. + + :rtype: `bytes` + """ + userdata_dir = get_userdata_template_dir() + result = _generate_user_data( + node, userdata_dir, 'user_data_disk_erasing.template', + 'user_data_config.template') + return result diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/poweroff.py maas-1.7.6+bzr3376/src/metadataserver/user_data/poweroff.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/poweroff.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/poweroff.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Poweroff userdata generation.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "generate_user_data", +] + +from metadataserver.user_data.snippets import get_userdata_template_dir +from metadataserver.user_data.utils import ( + generate_user_data as _generate_user_data, + ) + + +def generate_user_data(node): + """Produce the poweroff script. + + :rtype: `bytes` + """ + userdata_dir = get_userdata_template_dir() + result = _generate_user_data( + node, userdata_dir, 'user_data_poweroff.template', + 'user_data_config.template') + return result diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/snippets.py maas-1.7.6+bzr3376/src/metadataserver/user_data/snippets.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/snippets.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/snippets.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Low-level routines for access to snippets. + +These are used by the user-data code, but also by `setup.py`. That's why +importing this must not pull in any unnecessary framework modules etc. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'list_snippets', + 'read_snippet', + 'strip_name', + 'get_snippet_context', + 'get_userdata_template_dir', + ] + +import os + +from provisioningserver.utils import locate_config +from provisioningserver.utils.fs import read_text_file + + +USERDATA_BASE_DIR = 'templates/commissioning-user-data' + + +def get_userdata_template_dir(): + """Return the absolute location of the userdata + template directory.""" + return locate_config(USERDATA_BASE_DIR) + + +def get_snippet_context(snippets_dir=None, encoding='utf-8'): + """Return the context of all of the snippets.""" + if snippets_dir is None: + snippets_dir = os.path.join(get_userdata_template_dir(), 'snippets') + snippets = { + strip_name(name): read_snippet(snippets_dir, name, encoding=encoding) + for name in list_snippets(snippets_dir) + } + return snippets + + +def read_snippet(snippets_dir, name, encoding='utf-8'): + """Read a snippet file. + + :rtype: `unicode` + """ + return read_text_file(os.path.join(snippets_dir, name), encoding=encoding) + + +def is_snippet(filename): + """Does `filename` represent a valid snippet name?""" + return all([ + not filename.startswith('.'), + filename != '__init__.py', + filename != 'tests', + not filename.endswith('.pyc'), + not filename.endswith('~'), + ]) + + +def list_snippets(snippets_dir): + """List names of available snippets.""" + return filter(is_snippet, os.listdir(snippets_dir)) + + +def strip_name(snippet_name): + """Canonicalize a snippet name.""" + # Dot suffixes do not work well in tempita variable names. + return snippet_name.replace('.', '_') diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_commissioning.py maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_commissioning.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_commissioning.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_commissioning.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test generation of commissioning user data.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.preseed import get_preseed_context +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from maastesting.matchers import MockCalledWith +from metadataserver.user_data import utils +from metadataserver.user_data.commissioning import generate_user_data +from mock import Mock +from testtools.matchers import ContainsAll + + +class TestCommissioningUserData(MAASServerTestCase): + + def test_generate_user_data_produces_commissioning_script(self): + # generate_user_data produces a commissioning script which contains + # both definitions and use of various commands in python. + node = factory.make_Node() + self.assertThat( + generate_user_data(node), ContainsAll({ + 'maas-get', + 'maas-signal', + 'maas-ipmi-autodetect', + 'def authenticate_headers', + 'def encode_multipart_data', + })) + + def test_nodegroup_passed_to_get_preseed_context(self): + # I don't care about what effect it has, I just want to know + # that it was passed as it can affect the contents of + # `server_host` in the context. + utils.get_preseed_context = Mock( + # Use the real return value as it contains data necessary to + # render the template. + return_value=get_preseed_context()) + node = factory.make_Node() + generate_user_data(node) + self.assertThat( + utils.get_preseed_context, + MockCalledWith(nodegroup=node.nodegroup)) + + def test_generate_user_data_generates_mime_multipart(self): + # The generate_user_data func should create a MIME multipart + # message consisting of cloud-config and x-shellscript + # attachments. + node = factory.make_Node() + self.assertThat( + generate_user_data(node), ContainsAll({ + 'multipart', + 'Content-Type: text/cloud-config', + 'Content-Type: text/x-shellscript', + })) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_disk_erasing.py maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_disk_erasing.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_disk_erasing.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_disk_erasing.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,33 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test generation of disk erasing user data.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from metadataserver.user_data.disk_erasing import generate_user_data +from testtools.matchers import ContainsAll + + +class TestDiskErasingUserData(MAASServerTestCase): + + def test_generate_user_data_produces_disk_erase_script(self): + node = factory.make_Node() + self.assertThat( + generate_user_data(node), ContainsAll({ + 'maas-signal', + 'erase_disks', + 'def authenticate_headers', + 'def encode_multipart_data', + })) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_poweroff.py maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_poweroff.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_poweroff.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_poweroff.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,31 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test generation of poweroff user data.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maasserver.testing.factory import factory +from maasserver.testing.testcase import MAASServerTestCase +from metadataserver.user_data.poweroff import generate_user_data +from testtools.matchers import ContainsAll + + +class TestPoweroffUserData(MAASServerTestCase): + + def test_generate_user_data_produces_poweroff_script(self): + node = factory.make_Node() + self.assertThat( + generate_user_data(node), ContainsAll({ + 'Powering node off', + 'poweroff', + })) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_snippets.py maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_snippets.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/tests/test_snippets.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/tests/test_snippets.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,79 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test the snippets-related support routines for commissioning user data.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os.path + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from metadataserver.user_data.snippets import ( + get_snippet_context, + is_snippet, + list_snippets, + read_snippet, + strip_name, + ) + + +class TestSnippets(MAASTestCase): + + def test_read_snippet_reads_snippet_file(self): + contents = factory.make_string() + snippet = self.make_file(contents=contents) + self.assertEqual( + contents, + read_snippet(os.path.dirname(snippet), os.path.basename(snippet))) + + def test_strip_name_leaves_simple_names_intact(self): + simple_name = factory.make_string() + self.assertEqual(simple_name, strip_name(simple_name)) + + def test_strip_name_replaces_dots(self): + self.assertEqual('_x_y_', strip_name('.x.y.')) + + def test_is_snippet(self): + are_snippets = { + 'snippet': True, + 'with-dash': True, + 'module.py': True, + '.backup': False, + 'backup~': False, + 'module.pyc': False, + '__init__.pyc': False, + 'tests': False, + } + self.assertEqual( + are_snippets, + {name: is_snippet(name) for name in are_snippets}) + + def test_list_snippets(self): + snippets_dir = self.make_dir() + factory.make_file(snippets_dir, 'snippet') + factory.make_file(snippets_dir, '.backup.pyc') + self.assertItemsEqual(['snippet'], list_snippets(snippets_dir)) + + def test_get_snippet_context(self): + contents = factory.make_string() + snippets_dir = self.make_dir() + factory.make_file(snippets_dir, 'snippet.py', contents=contents) + self.assertItemsEqual( + {'snippet_py': contents}, + get_snippet_context(snippets_dir=snippets_dir)) + + def test_get_snippet_context_empty_if_no_snippets(self): + snippets_dir = self.make_dir() + context = {} + self.assertEqual( + context, get_snippet_context(snippets_dir=snippets_dir)) diff -Nru maas-1.5.4+bzr2294/src/metadataserver/user_data/utils.py maas-1.7.6+bzr3376/src/metadataserver/user_data/utils.py --- maas-1.5.4+bzr2294/src/metadataserver/user_data/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/metadataserver/user_data/utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2012-2013 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Generate commissioning user-data from template and code snippets. + +This combines the `user_data.template` and the snippets of code in the +`snippets` directory into the main commissioning script. + +Its contents are not customizable. To inject custom code, use the +:class:`CommissioningScript` model. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'generate_user_data', + ] + +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +import os.path + +from maasserver.preseed import get_preseed_context +from metadataserver.user_data.snippets import get_snippet_context +import tempita + + +ENCODING = 'utf-8' + + +def generate_user_data(node, userdata_dir, + userdata_template_name, config_template_name): + """Produce a user_data script for use by commissioning and other + operations. + + The main template file contains references to so-called ``snippets'' + which are read in here, and substituted. In addition, the regular + preseed context variables are available (such as 'http_proxy'). + + The final result is a MIME multipart message that consists of a + 'cloud-config' part and an 'x-shellscript' part. This allows maximum + flexibility with cloud-init as we read in a template + 'user_data_config.template' to set cloud-init configs before the script + is run. + + :rtype: `bytes` + """ + userdata_template_file = os.path.join( + userdata_dir, userdata_template_name) + config_template_file = os.path.join( + userdata_dir, config_template_name) + userdata_template = tempita.Template.from_filename( + userdata_template_file, encoding=ENCODING) + config_template = tempita.Template.from_filename( + config_template_file, encoding=ENCODING) + # The preseed context is a dict containing various configs that the + # templates can use. + nodegroup = node.nodegroup + preseed_context = get_preseed_context(nodegroup=nodegroup) + preseed_context['node'] = node + + # Render the snippets in the main template. + snippets = get_snippet_context(encoding=ENCODING) + snippets.update(preseed_context) + userdata = userdata_template.substitute(snippets).encode(ENCODING) + + # Render the config. + config = config_template.substitute(preseed_context) + + # Create a MIME multipart message from the config and the userdata. + config_part = MIMEText(config, 'cloud-config', ENCODING) + config_part.add_header( + 'Content-Disposition', 'attachment; filename="config"') + data_part = MIMEText(userdata, 'x-shellscript', ENCODING) + data_part.add_header( + 'Content-Disposition', 'attachment; filename="user_data.sh"') + combined = MIMEMultipart() + combined.attach(config_part) + combined.attach(data_part) + return combined.as_string() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/amqpclient.py maas-1.7.6+bzr3376/src/provisioningserver/amqpclient.py --- maas-1.5.4+bzr2294/src/provisioningserver/amqpclient.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/amqpclient.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,127 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -# Shamelessly cargo-culted from the txlongpoll source. - -""" -Asynchronous client for AMQP using txAMQP. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - "AMQFactory", - ] - -import os.path - -from twisted.internet.defer import maybeDeferred -from twisted.internet.protocol import ReconnectingClientFactory -from txamqp.client import TwistedDelegate -from txamqp.protocol import AMQClient -from txamqp.queue import Closed -from txamqp.spec import load as load_spec - - -class AMQClientWithCallback(AMQClient): - """ - An C{AMQClient} that notifies connections with a callback. - - @ivar connected_callback: callback called when C{connectionMade} is - called. It takes one argument, the protocol instance itself. - """ - - def __init__(self, connected_callback, *args, **kwargs): - AMQClient.__init__(self, *args, **kwargs) - self.connected_callback = connected_callback - - def connectionMade(self): - AMQClient.connectionMade(self) - self.connected_callback(self) - - -_base_dir = os.path.dirname(os.path.abspath(__file__)) -AMQP0_8_SPEC = load_spec(os.path.join(_base_dir, "specs", "amqp0-8.xml")) -del _base_dir - - -class AMQFactory(ReconnectingClientFactory): - """ - A C{ClientFactory} for C{AMQClient} protocol with reconnecting facilities. - - @ivar user: the user name to use to connect to the AMQP server. - @ivar password: the corresponding password of the user. - @ivar vhost: the AMQP vhost to create connections against. - @ivar connected_callback: callback called when a successful connection - happened. It takes one argument, the channel opened for the connection. - @ivar disconnected_callback: callback called when a previously connected - connection was lost. It takes no argument. - """ - protocol = AMQClientWithCallback - initialDelay = 0.01 - - def __init__(self, user, password, vhost, connected_callback, - disconnected_callback, failed_callback, spec=None): - self.user = user - self.password = password - self.vhost = vhost - self.delegate = TwistedDelegate() - if spec is None: - spec = AMQP0_8_SPEC - self.spec = spec - self.connected_callback = connected_callback - self.disconnected_callback = disconnected_callback - self.failed_callback = failed_callback - - def buildProtocol(self, addr): - """ - Create the protocol instance and returns it for letting Twisted - connect it to the transport. - - @param addr: the attributed address, unused for now. - """ - protocol = self.protocol(self.clientConnectionMade, self.delegate, - self.vhost, spec=self.spec) - protocol.factory = self - return protocol - - def clientConnectionMade(self, client): - """ - Called when a connection succeeds: login to the server, and open a - channel against it. - """ - self.resetDelay() - - def started(ignored): - # We don't care about authenticate result as long as it succeeds - return client.channel(1).addCallback(got_channel) - - def got_channel(channel): - return channel.channel_open().addCallback(opened, channel) - - def opened(ignored, channel): - deferred = maybeDeferred( - self.connected_callback, (client, channel)) - deferred.addErrback(catch_closed) - - def catch_closed(failure): - failure.trap(Closed) - - deferred = client.authenticate(self.user, self.password) - return deferred.addCallback(started) - - def clientConnectionLost(self, connector, reason): - ReconnectingClientFactory.clientConnectionLost(self, connector, reason) - self.disconnected_callback() - - def clientConnectionFailed(self, connector, reason): - ReconnectingClientFactory.clientConnectionFailed( - self, connector, reason) - self.failed_callback((connector, reason)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/auth.py maas-1.7.6+bzr3376/src/provisioningserver/auth.py --- maas-1.5.4+bzr2294/src/provisioningserver/auth.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/auth.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """API credentials for node-group workers.""" @@ -13,54 +13,26 @@ __metaclass__ = type __all__ = [ - 'get_recorded_api_credentials', - 'get_recorded_nodegroup_uuid', - 'record_api_credentials', - 'record_nodegroup_uuid', + 'get_maas_user_gpghome', ] -from apiclient.creds import convert_string_to_tuple -from provisioningserver import cache +from provisioningserver.path import get_path -# Cache key for the API credentials as last sent by the server. -API_CREDENTIALS_CACHE_KEY = 'api_credentials' -# Cache key for the uuid of the nodegroup that this worker manages. -NODEGROUP_UUID_CACHE_KEY = 'nodegroup_uuid' +def get_maas_user_gpghome(): + """Return the GPG directory for the `maas` user. - -def record_api_credentials(api_credentials): - """Update the recorded API credentials. - - :param api_credentials: Newly received API credentials, in the form of - a single string: consumer key, resource token, and resource seret - separated by colons. + Set $GPGHOME to this value ad-hoc when needed. """ - cache.cache.set(API_CREDENTIALS_CACHE_KEY, api_credentials) - + return get_path('/var/lib/maas/gnupg') -def get_recorded_api_credentials(): - """Return API credentials as last received from the server. - - :return: If credentials have been received, a tuple of - (consumer_key, resource_token, resource_secret) as expected by - :class:`MAASOauth`. Otherwise, None. - """ - credentials_string = cache.cache.get(API_CREDENTIALS_CACHE_KEY) - if credentials_string is None: - return None - else: - return convert_string_to_tuple(credentials_string) +cache = {} -def record_nodegroup_uuid(nodegroup_uuid): - """Record the uuid of the nodegroup we manage, as sent by the server.""" - cache.cache.set(NODEGROUP_UUID_CACHE_KEY, nodegroup_uuid) +# Cache key for the API credentials as last sent by the server. +API_CREDENTIALS_CACHE_KEY = 'api_credentials' -def get_recorded_nodegroup_uuid(): - """Return the uuid of this worker's nodegroup, as sent by the server. - If the server has not sent the name yet, returns None. - """ - return cache.cache.get(NODEGROUP_UUID_CACHE_KEY) +# Cache key for the uuid of the nodegroup that this worker manages. +NODEGROUP_UUID_CACHE_KEY = 'nodegroup_uuid' diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/boot/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -29,9 +29,11 @@ from provisioningserver.boot.tftppath import compose_image_path from provisioningserver.kernel_opts import compose_kernel_command_line from provisioningserver.utils import locate_config +from provisioningserver.utils.network import find_mac_via_arp from provisioningserver.utils.registry import Registry import tempita from tftp.backend import IReader +from twisted.python.context import get from zope.interface import implementer @@ -94,6 +96,16 @@ yield "config.template" +def get_remote_mac(): + """Gets the requestors MAC address from arp cache. + + This is used, when the dhcp lease file is not up-to-date soon enough + to extract the MAC address from the IP address assigned by dhcp. + """ + remote_host, remote_port = get("remote", (None, None)) + return find_mac_via_arp(remote_host) + + class BootMethod: """Skeleton for a boot method.""" @@ -103,6 +115,9 @@ # the dhcpd.conf that is generated. path_prefix = None + # Arches for which this boot method needs to install boot loaders. + bootloader_arches = [] + @abstractproperty def name(self): """Name of the boot method.""" @@ -127,7 +142,7 @@ :param backend: requesting backend :param path: requested path - :returns: dict of match params from path, None if no match + :return: dict of match params from path, None if no match """ @abstractmethod @@ -163,7 +178,7 @@ :param purpose: The boot purpose, e.g. "local". :param arch: Main machine architecture. :param subarch: Sub-architecture, or "generic" if there is none. - :returns: `tempita.Template` + :return: `tempita.Template` """ pxe_templates_dir = self.get_template_dir() for filename in gen_template_filenames(purpose, arch, subarch): @@ -180,7 +195,7 @@ " Purpose: %r, Arch: %r, Subarch: %r\n" "This can happen if you manually power up a node when its " "state is not one that allows it. Is the node in the " - "'Declared' or 'Ready' states? It needs to be Enlisting, " + "'New' or 'Ready' states? It needs to be Enlisting, " "Commissioning or Allocated." % ( pxe_templates_dir, purpose, arch, subarch)) @@ -192,7 +207,7 @@ """ def image_dir(params): return compose_image_path( - params.arch, params.subarch, + params.osystem, params.arch, params.subarch, params.release, params.label) def initrd_path(params): @@ -216,6 +231,7 @@ "kernel_params": kernel_params, "kernel_path": kernel_path, } + return namespace @@ -228,6 +244,7 @@ from provisioningserver.boot.uefi import UEFIBootMethod from provisioningserver.boot.powerkvm import PowerKVMBootMethod from provisioningserver.boot.powernv import PowerNVBootMethod +from provisioningserver.boot.windows import WindowsPXEBootMethod builtin_boot_methods = [ @@ -235,6 +252,7 @@ UEFIBootMethod(), PowerKVMBootMethod(), PowerNVBootMethod(), + WindowsPXEBootMethod(), ] for method in builtin_boot_methods: BootMethodRegistry.register_item(method.name, method) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/install_grub.py maas-1.7.6+bzr3376/src/provisioningserver/boot/install_grub.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/install_grub.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/install_grub.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,7 +21,7 @@ from provisioningserver.boot.install_bootloader import make_destination from provisioningserver.config import Config -from provisioningserver.utils import write_text_file +from provisioningserver.utils.fs import write_text_file CONFIG_FILE = """ diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/powerkvm.py maas-1.7.6+bzr3376/src/provisioningserver/boot/powerkvm.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/powerkvm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/powerkvm.py 2015-07-10 01:27:14.000000000 +0000 @@ -26,10 +26,8 @@ utils, ) from provisioningserver.boot.install_bootloader import install_bootloader -from provisioningserver.utils import ( - call_and_check, - tempdir, - ) +from provisioningserver.utils.fs import tempdir +from provisioningserver.utils.shell import call_and_check GRUB_CONFIG = dedent("""\ @@ -43,6 +41,7 @@ name = "powerkvm" template_subdir = None bootloader_path = "bootppc64.bin" + bootloader_arches = ['ppc64el'] arch_octet = "00:0C" def match_path(self, backend, path): diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/powernv.py maas-1.7.6+bzr3376/src/provisioningserver/boot/powernv.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/powernv.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/powernv.py 2015-07-10 01:27:14.000000000 +0000 @@ -22,15 +22,14 @@ BootMethod, BytesReader, get_parameters, + get_remote_mac, ) from provisioningserver.boot.pxe import ( ARP_HTYPE, re_mac_address, ) from provisioningserver.kernel_opts import compose_kernel_command_line -from provisioningserver.utils import find_mac_via_arp from tftp.backend import FilesystemReader -from twisted.python.context import get # The pxelinux.cfg path is prefixed with the architecture for the # PowerNV nodes. This prefix is set by the path-prefix dhcpd option. @@ -62,7 +61,7 @@ """Formats a mac address into the BOOTIF format, expected by the linux kernel.""" mac = mac.replace(':', '-') - mac = mac.upper() + mac = mac.lower() return '%02x-%s' % (ARP_HTYPE.ETHERNET, mac) @@ -74,16 +73,6 @@ arch_octet = "00:0E" path_prefix = "ppc64el/" - def get_remote_mac(self): - """Gets the requestors MAC address from arp cache. - - This is used, when the pxelinux.cfg is requested without the mac - address appended. This is needed to inject the BOOTIF into the - pxelinux.cfg that is returned to the node. - """ - remote_host, remote_port = get("remote", (None, None)) - return find_mac_via_arp(remote_host) - def get_params(self, backend, path): """Gets the matching parameters from the requested path.""" match = re_config_file.match(path) @@ -99,14 +88,14 @@ :param backend: requesting backend :param path: requested path - :returns: dict of match params from path, None if no match + :return: dict of match params from path, None if no match """ params = self.get_params(backend, path) if params is None: return None params['arch'] = "ppc64el" if 'mac' not in params: - mac = self.get_remote_mac() + mac = get_remote_mac() if mac is not None: params['mac'] = mac return params diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/pxe.py maas-1.7.6+bzr3376/src/provisioningserver/boot/pxe.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/pxe.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/pxe.py 2015-07-10 01:27:14.000000000 +0000 @@ -26,11 +26,29 @@ get_parameters, ) from provisioningserver.boot.install_bootloader import install_bootloader +from provisioningserver.utils.fs import atomic_symlink - +# Bootloader file names to install. BOOTLOADERS = ['pxelinux.0', 'chain.c32', 'ifcpu64.c32'] -BOOTLOADER_DIR = '/usr/lib/syslinux' +# Possible locations in which to find the bootloader files. Search these +# in this order for each file. (This exists because locations differ +# across Ubuntu releases.) +BOOTLOADER_DIRS = [ + '/usr/lib/PXELINUX', + '/usr/lib/syslinux', + '/usr/lib/syslinux/modules/bios' +] + +# List of possible directories where to find additioning bootloader files. +# The first existing directory will be symlinked to /syslinux/ inside +# the TFTP root directory. +SYSLINUX_DIRS = [ + # Location for syslinux version 6 (the version in Utopic). + '/usr/lib/syslinux/modules/bios', + # Location for syslinux version 4 (the version in Trusty). + '/usr/lib/syslinux' +] class ARP_HTYPE: @@ -76,6 +94,7 @@ name = "pxe" template_subdir = "pxe" + bootloader_arches = ['i386', 'amd64'] bootloader_path = "pxelinux.0" arch_octet = "00:00" @@ -85,7 +104,7 @@ :param backend: requesting backend :param path: requested path - :returns: dict of match params from path, None if no match + :return: dict of match params from path, None if no match """ match = re_config_file.match(path) if match is None: @@ -107,11 +126,37 @@ namespace = self.compose_template_namespace(kernel_params) return BytesReader(template.substitute(namespace).encode("utf-8")) - def install_bootloader(self, destination): - """Installs the required files for PXE booting into the - tftproot. + def locate_bootloader(self, bootloader): + """Search BOOTLOADER_DIRS for bootloader. + + :return: The full file path where the bootloader was found, or None. """ + for dir in BOOTLOADER_DIRS: + filename = os.path.join(dir, bootloader) + if os.path.exists(filename): + return filename + return None + + def locate_syslinux_dir(self): + """Search for an existing directory among SYSLINUX_DIRS.""" + for bootloader_dir in SYSLINUX_DIRS: + if os.path.exists(bootloader_dir): + return bootloader_dir + return None + + def install_bootloader(self, destination): + """Installs the required files and symlinks into the tftproot.""" for bootloader in BOOTLOADERS: - bootloader_src = os.path.join(BOOTLOADER_DIR, bootloader) + # locate_bootloader might return None but happy to let that + # traceback here is it should never happen unless there's a + # serious problem with packaging. + bootloader_src = self.locate_bootloader(bootloader) bootloader_dst = os.path.join(destination, bootloader) install_bootloader(bootloader_src, bootloader_dst) + + # Create /syslinux/ symlink. PXE linux tries this subdirectory + # when trying to fetch files for PXE-booting. + bootloader_dir = self.locate_syslinux_dir() + if bootloader_dir is not None: + atomic_symlink( + bootloader_dir, os.path.join(destination, 'syslinux')) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_boot.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_boot.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_boot.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_boot.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,15 +19,22 @@ from fixtures import EnvironmentVariableFixture from maastesting.factory import factory -from maastesting.testcase import MAASTestCase +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) import mock from provisioningserver import boot from provisioningserver.boot import ( BootMethod, BytesReader, gen_template_filenames, + get_remote_mac, ) import tempita +from twisted.internet.defer import inlineCallbacks +from twisted.python import context class FakeBootMethod(BootMethod): @@ -50,6 +57,24 @@ class TestBootMethod(MAASTestCase): """Test for `BootMethod` in `provisioningserver.boot`.""" + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + @inlineCallbacks + def test_get_remote_mac(self): + remote_host = factory.make_ipv4_address() + call_context = { + "local": ( + factory.make_ipv4_address(), + factory.pick_port()), + "remote": ( + remote_host, + factory.pick_port()), + } + + mock_find = self.patch(boot, 'find_mac_via_arp') + yield context.call(call_context, get_remote_mac) + self.assertThat(mock_find, MockCalledOnceWith(remote_host)) + def test_gen_template_filenames(self): purpose = factory.make_name("purpose") arch, subarch = factory.make_names("arch", "subarch") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_install_bootloader.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_install_bootloader.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_install_bootloader.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_install_bootloader.py 2015-07-10 01:27:14.000000000 +0000 @@ -35,7 +35,7 @@ class TestInstallBootloader(MAASTestCase): def test_integration(self): - loader_contents = factory.getRandomString() + loader_contents = factory.make_string() loader = self.make_file(contents=loader_contents) destination = self.make_file() install_bootloader(loader, destination) @@ -53,7 +53,7 @@ self.assertThat(dest, DirExists()) def test_install_bootloader_installs_new_bootloader(self): - contents = factory.getRandomString() + contents = factory.make_string() loader = self.make_file(contents=contents) install_dir = self.make_dir() dest = os.path.join(install_dir, factory.make_name('loader')) @@ -61,14 +61,14 @@ self.assertThat(dest, FileContains(contents)) def test_install_bootloader_replaces_bootloader_if_changed(self): - contents = factory.getRandomString() + contents = factory.make_string() loader = self.make_file(contents=contents) dest = self.make_file(contents="Old contents") install_bootloader(loader, dest) self.assertThat(dest, FileContains(contents)) def test_install_bootloader_skips_if_unchanged(self): - contents = factory.getRandomString() + contents = factory.make_string() dest = self.make_file(contents=contents) age_file(dest, 100) original_write_time = get_write_time(dest) @@ -78,7 +78,7 @@ self.assertEqual(original_write_time, get_write_time(dest)) def test_install_bootloader_sweeps_aside_dot_new_if_any(self): - contents = factory.getRandomString() + contents = factory.make_string() loader = self.make_file(contents=contents) dest = self.make_file(contents="Old contents") temp_file = '%s.new' % dest diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_install_grub.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_install_grub.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_install_grub.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_install_grub.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,7 +21,7 @@ import provisioningserver.boot.install_grub from provisioningserver.boot.tftppath import locate_tftp_path from provisioningserver.testing.config import set_tftp_root -from provisioningserver.utils import MainScript +from provisioningserver.utils.script import MainScript from testtools.matchers import FileExists diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_powerkvm.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_powerkvm.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_powerkvm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_powerkvm.py 2015-07-10 01:27:14.000000000 +0000 @@ -37,7 +37,7 @@ def test_match_path_returns_None(self): method = PowerKVMBootMethod() - paths = [factory.getRandomString() for _ in range(3)] + paths = [factory.make_string() for _ in range(3)] for path in paths: self.assertEqual(None, method.match_path(None, path)) @@ -56,7 +56,7 @@ def test_install_bootloader(self): method = PowerKVMBootMethod() filename = factory.make_name('dpkg') - data = factory.getRandomString() + data = factory.make_string() tmp = self.make_dir() dest = self.make_dir() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_powernv.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_powernv.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_powernv.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_powernv.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,7 +19,10 @@ from maastesting.factory import factory from maastesting.testcase import MAASTestCase -from provisioningserver.boot import BytesReader +from provisioningserver.boot import ( + BytesReader, + powernv as powernv_module, + ) from provisioningserver.boot.powernv import ( ARP_HTYPE, format_bootif, @@ -28,9 +31,9 @@ ) from provisioningserver.boot.tests.test_pxe import parse_pxe_config from provisioningserver.boot.tftppath import compose_image_path +from provisioningserver.pserv_services.tftp import TFTPBackend from provisioningserver.testing.config import set_tftp_root from provisioningserver.tests.test_kernel_opts import make_kernel_parameters -from provisioningserver.tftp import TFTPBackend from testtools.matchers import ( IsInstance, MatchesAll, @@ -65,7 +68,7 @@ The path is intended to match `re_config_file`, and the components are the expected groups from a match. """ - components = {"mac": factory.getRandomMACAddress("-")} + components = {"mac": factory.make_mac_address("-")} config_path = compose_config_path(components["mac"]) return config_path, components @@ -133,8 +136,8 @@ def test_match_path_pxe_config_without_mac(self): method = PowerNVBootMethod() - fake_mac = factory.getRandomMACAddress() - self.patch(method, 'get_remote_mac').return_value = fake_mac + fake_mac = factory.make_mac_address() + self.patch(powernv_module, 'get_remote_mac').return_value = fake_mac config_path = 'ppc64el/pxelinux.cfg/default' params = method.match_path(None, config_path) expected = { @@ -145,8 +148,8 @@ def test_match_path_pxe_prefix_request(self): method = PowerNVBootMethod() - fake_mac = factory.getRandomMACAddress() - self.patch(method, 'get_remote_mac').return_value = fake_mac + fake_mac = factory.make_mac_address() + self.patch(powernv_module, 'get_remote_mac').return_value = fake_mac file_path = 'ppc64el/file' params = method.match_path(None, file_path) expected = { @@ -176,7 +179,7 @@ self.assertThat(output, StartsWith("DEFAULT ")) # The PXE parameters are all set according to the options. image_dir = compose_image_path( - arch=params.arch, subarch=params.subarch, + osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( @@ -220,7 +223,7 @@ def test_get_reader_appends_bootif(self): method = PowerNVBootMethod() - fake_mac = factory.getRandomMACAddress() + fake_mac = factory.make_mac_address() params = make_kernel_parameters(self, purpose="install") output = method.get_reader( backend=None, kernel_params=params, arch='ppc64el', mac=fake_mac) @@ -229,6 +232,19 @@ expected = 'BOOTIF=%s' % format_bootif(fake_mac) self.assertIn(expected, config['execute']['APPEND']) + def test_format_bootif_replaces_colon(self): + fake_mac = factory.make_mac_address() + self.assertEqual( + '01-%s' % fake_mac.replace(':', '-').lower(), + format_bootif(fake_mac)) + + def test_format_bootif_makes_mac_address_lower(self): + fake_mac = factory.make_mac_address() + fake_mac = fake_mac.upper() + self.assertEqual( + '01-%s' % fake_mac.replace(':', '-').lower(), + format_bootif(fake_mac)) + class TestPowerNVBootMethodPathPrefix(MAASTestCase): """Tests for @@ -236,7 +252,7 @@ """ def test_get_reader_path_prefix(self): - data = factory.getRandomString().encode("ascii") + data = factory.make_string().encode("ascii") temp_file = self.make_file(name="example", contents=data) temp_dir = os.path.dirname(temp_file) backend = TFTPBackend(temp_dir, "http://nowhere.example.com/") @@ -253,7 +269,7 @@ self.assertEqual(b"", reader.read(1)) def test_get_reader_path_prefix_only_removes_first_occurrence(self): - data = factory.getRandomString().encode("ascii") + data = factory.make_string().encode("ascii") temp_dir = self.make_dir() temp_subdir = os.path.join(temp_dir, 'ppc64el') os.mkdir(temp_subdir) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_pxe.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_pxe.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_pxe.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_pxe.py 2015-07-10 01:27:14.000000000 +0000 @@ -15,14 +15,21 @@ __all__ = [] from collections import OrderedDict +import os import re from maastesting.factory import factory +from maastesting.matchers import MockCallsMatch from maastesting.testcase import MAASTestCase +import mock from provisioningserver import kernel_opts -from provisioningserver.boot import BytesReader +from provisioningserver.boot import ( + BytesReader, + pxe as pxe_module, + ) from provisioningserver.boot.pxe import ( ARP_HTYPE, + BOOTLOADERS, PXEBootMethod, re_config_file, ) @@ -36,6 +43,7 @@ MatchesAll, MatchesRegex, Not, + SamePath, StartsWith, ) @@ -67,6 +75,18 @@ self.useFixture(set_tftp_root(tftproot)) return tftproot + def make_dummy_bootloader_sources(self, destination, loader_names): + """install_bootloader requires real files to exist, this method + creates them in the requested location. + + :return: list of created filenames + """ + created = [] + for loader in loader_names: + name = factory.make_file(destination, loader) + created.append(name) + return created + def test_compose_config_path_follows_maas_pxe_directory_layout(self): name = factory.make_name('config') self.assertEqual( @@ -103,6 +123,66 @@ method = PXEBootMethod() self.assertEqual('00:00', method.arch_octet) + def test_locate_bootloader(self): + # Put all the BOOTLOADERS except one in dir1, and the last in + # dir2. + dir1 = self.make_dir() + dir2 = self.make_dir() + dirs = [dir1, dir2] + self.patch(pxe_module, "BOOTLOADER_DIRS", dirs) + self.make_dummy_bootloader_sources(dir1, BOOTLOADERS[:-1]) + [displaced_loader] = self.make_dummy_bootloader_sources( + dir2, BOOTLOADERS[-1:]) + method = PXEBootMethod() + observed = method.locate_bootloader(BOOTLOADERS[-1]) + + self.assertEqual(displaced_loader, observed) + + def test_locate_bootloader_returns_None_if_not_found(self): + method = PXEBootMethod() + self.assertIsNone(method.locate_bootloader("foo")) + + def test_install_bootloader_installs_to_destination(self): + # Disable the symlink creation. + self.patch(pxe_module, "SYSLINUX_DIRS", []) + tftproot = self.make_tftp_root() + source_dir = self.make_dir() + self.patch(pxe_module, "BOOTLOADER_DIRS", [source_dir]) + self.make_dummy_bootloader_sources(source_dir, BOOTLOADERS) + install_bootloader_call = self.patch(pxe_module, "install_bootloader") + method = PXEBootMethod() + method.install_bootloader(tftproot) + + expected = [ + mock.call( + os.path.join(source_dir, bootloader), + os.path.join(tftproot, bootloader) + ) + for bootloader in BOOTLOADERS] + self.assertThat( + install_bootloader_call, + MockCallsMatch(*expected)) + + def test_locate_syslinux_dir_returns_dir(self): + dir1 = self.make_dir() + dir2 = self.make_dir() + dirs = [dir1, dir2] + self.patch(pxe_module, "SYSLINUX_DIRS", dirs) + method = PXEBootMethod() + found_dir = method.locate_syslinux_dir() + self.assertEqual(dir1, found_dir) + + def test_install_bootloader_creates_symlink(self): + # Disable the copying of the bootloaders. + self.patch(pxe_module, "BOOTLOADERS", []) + target_dir = self.make_dir() + self.patch(pxe_module, "SYSLINUX_DIRS", [target_dir]) + tftproot = self.make_tftp_root() + method = PXEBootMethod() + method.install_bootloader(tftproot) + syslinux_dir = os.path.join(tftproot, 'syslinux') + self.assertThat(syslinux_dir, SamePath(target_dir)) + def parse_pxe_config(text): """Parse a PXE config file. @@ -148,8 +228,8 @@ self.assertEqual({}, config) -class TestPXEBootMethodRenderConfig(MAASTestCase): - """Tests for `provisioningserver.boot.pxe.PXEBootMethod.render_config`.""" +class TestPXEBootMethodRender(MAASTestCase): + """Tests for `provisioningserver.boot.pxe.PXEBootMethod.render`.""" def test_get_reader_install(self): # Given the right configuration options, the PXE configuration is @@ -165,7 +245,7 @@ self.assertThat(output, StartsWith("DEFAULT ")) # The PXE parameters are all set according to the options. image_dir = compose_image_path( - arch=params.arch, subarch=params.subarch, + osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( @@ -249,10 +329,12 @@ method = PXEBootMethod() get_ephemeral_name = self.patch(kernel_opts, "get_ephemeral_name") get_ephemeral_name.return_value = factory.make_name("ephemeral") + osystem = factory.make_name('osystem') options = { "backend": None, "kernel_params": make_kernel_parameters( - testcase=self, subarch="generic", purpose=self.purpose), + testcase=self, osystem=osystem, subarch="generic", + purpose=self.purpose), } output = method.get_reader(**options).read(10000) config = parse_pxe_config(output) @@ -275,7 +357,8 @@ section = config[section_label] self.assertThat( section, ContainsAll(("KERNEL", "INITRD", "APPEND"))) - contains_arch_path = StartsWith("%s/" % section_label) + contains_arch_path = StartsWith( + "%s/%s/" % (osystem, section_label)) self.assertThat(section["KERNEL"], contains_arch_path) self.assertThat(section["INITRD"], contains_arch_path) self.assertIn("APPEND", section) @@ -291,7 +374,7 @@ The path is intended to match `re_config_file`, and the components are the expected groups from a match. """ - components = {"mac": factory.getRandomMACAddress("-"), + components = {"mac": factory.make_mac_address("-"), "arch": None, "subarch": None} config_path = compose_config_path(components["mac"]) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_tftppath.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_tftppath.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_tftppath.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_tftppath.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,22 +18,38 @@ import os.path from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import Mock +from provisioningserver import config from provisioningserver.boot import tftppath from provisioningserver.boot.tftppath import ( compose_image_path, drill_down, extend_path, + extract_image_params, + extract_metadata, is_visible_subdir, list_boot_images, list_subdirs, locate_tftp_path, + maas_meta_last_modified, + ) +from provisioningserver.drivers.osystem import OperatingSystemRegistry +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.helpers import ImageSpec +from provisioningserver.import_images.testing.factory import ( + make_image_spec, + set_resource, ) from provisioningserver.testing.boot_images import ( make_boot_image_storage_params, + make_image, ) from provisioningserver.testing.config import set_tftp_root +from provisioningserver.testing.os import make_osystem from testtools.matchers import ( Not, StartsWith, @@ -41,16 +57,6 @@ from testtools.testcase import ExpectedException -def make_image(params, purpose): - """Describe an image as a dict similar to what `list_boot_images` returns. - - The `params` are as returned from `make_boot_image_storage_params`. - """ - image = params.copy() - image['purpose'] = purpose - return image - - class TestTFTPPath(MAASTestCase): def setUp(self): @@ -62,6 +68,7 @@ """Fake a boot image matching `image_params` under `tftproot`.""" image_dir = locate_tftp_path( compose_image_path( + osystem=image_params['osystem'], arch=image_params['architecture'], subarch=image_params['subarchitecture'], release=image_params['release'], @@ -71,22 +78,62 @@ factory.make_file(image_dir, 'linux') factory.make_file(image_dir, 'initrd.gz') + def make_meta_file(self, image_params, image_resource, tftproot): + image = ImageSpec( + os=image_params["osystem"], + arch=image_params["architecture"], + subarch=image_params["subarchitecture"], + release=image_params["release"], label=image_params["label"]) + mapping = BootImageMapping() + mapping.setdefault(image, image_resource) + maas_meta = mapping.dump_json() + with open(os.path.join(tftproot, "maas.meta"), "wb") as f: + f.write(maas_meta) + + def test_maas_meta_last_modified_returns_modification_time(self): + path = factory.make_file(self.tftproot, name="maas.meta") + expected = os.path.getmtime(path) + observed = maas_meta_last_modified(self.tftproot) + self.assertEqual(expected, observed) + + def test_maas_meta_last_modified_returns_None_if_no_file(self): + observed = maas_meta_last_modified( + os.path.join(self.tftproot, "maas.meta")) + self.assertIsNone(observed) + + def test_maas_meta_last_modified_defaults_tftproot(self): + path = factory.make_file(self.tftproot, name="maas.meta") + maas_meta_file_path = self.patch(tftppath, 'maas_meta_file_path') + maas_meta_file_path.return_value = path + maas_meta_last_modified() + expected_path = os.path.join( + config.BOOT_RESOURCES_STORAGE, 'current') + self.assertThat(maas_meta_file_path, MockCalledOnceWith(expected_path)) + + def test_maas_meta_last_modified_reraises_non_ENOENT(self): + oserror = OSError() + oserror.errno = errno.E2BIG + self.patch(os.path, 'getmtime').side_effect = oserror + self.assertRaises(OSError, maas_meta_last_modified) + def test_compose_image_path_follows_storage_directory_layout(self): + osystem = factory.make_name('osystem') arch = factory.make_name('arch') subarch = factory.make_name('subarch') release = factory.make_name('release') label = factory.make_name('label') self.assertEqual( - '%s/%s/%s/%s' % (arch, subarch, release, label), - compose_image_path(arch, subarch, release, label)) + '%s/%s/%s/%s/%s' % (osystem, arch, subarch, release, label), + compose_image_path(osystem, arch, subarch, release, label)) def test_compose_image_path_does_not_include_tftp_root(self): + osystem = factory.make_name('osystem') arch = factory.make_name('arch') subarch = factory.make_name('subarch') release = factory.make_name('release') label = factory.make_name('label') self.assertThat( - compose_image_path(arch, subarch, release, label), + compose_image_path(osystem, arch, subarch, release, label), Not(StartsWith(self.tftproot))) def test_locate_tftp_path_prefixes_tftp_root(self): @@ -100,13 +147,13 @@ self.tftproot, locate_tftp_path(None, tftproot=self.tftproot)) def test_list_boot_images_copes_with_missing_directory(self): - self.assertEqual([], list_boot_images(factory.getRandomString())) + self.assertEqual([], list_boot_images(factory.make_string())) def test_list_boot_images_passes_on_other_exceptions(self): error = OSError(errno.EACCES, "Deliberate error for testing.") self.patch(tftppath, 'list_subdirs', Mock(side_effect=error)) with ExpectedException(OSError): - list_boot_images(factory.getRandomString()) + list_boot_images(factory.make_string()) def test_list_boot_images_copes_with_empty_directory(self): self.assertEqual([], list_boot_images(self.tftproot)) @@ -120,22 +167,46 @@ params = make_boot_image_storage_params() self.make_image_dir(params, self.tftproot) purposes = ['install', 'commissioning', 'xinstall'] + make_osystem(self, params['osystem'], purposes) self.assertItemsEqual( [make_image(params, purpose) for purpose in purposes], list_boot_images(self.tftproot)) def test_list_boot_images_enumerates_boot_images(self): + purposes = ['install', 'commissioning', 'xinstall'] params = [make_boot_image_storage_params() for counter in range(3)] for param in params: self.make_image_dir(param, self.tftproot) + make_osystem(self, param['osystem'], purposes) self.assertItemsEqual( [ make_image(param, purpose) for param in params - for purpose in ['install', 'commissioning', 'xinstall'] + for purpose in purposes ], list_boot_images(self.tftproot)) + def test_list_boot_images_merges_maas_meta_data(self): + params = make_boot_image_storage_params() + self.make_image_dir(params, self.tftproot) + # The required metadata is called "subarches" in maas.meta + metadata = dict(subarches=factory.make_name("subarches")) + self.make_meta_file(params, metadata, self.tftproot) + purposes = ['install', 'commissioning', 'xinstall'] + make_osystem(self, params['osystem'], purposes) + # The API requires "supported_subarches". + expected_metadata = dict(supported_subarches=metadata["subarches"]) + self.assertItemsEqual( + [make_image(params, purpose, expected_metadata) + for purpose in purposes], + list_boot_images(self.tftproot)) + + def test_list_boot_images_empty_on_missing_osystems(self): + params = [make_boot_image_storage_params() for counter in range(3)] + for param in params: + self.make_image_dir(param, self.tftproot) + self.assertItemsEqual([], list_boot_images(self.tftproot)) + def test_is_visible_subdir_ignores_regular_files(self): plain_file = self.make_file() self.assertFalse( @@ -223,3 +294,179 @@ self.assertEqual( [[deep_dir, subdir]], drill_down(base_dir, [[shallow_dir], [deep_dir]])) + + def test_extract_metadata(self): + resource = dict( + subarches=factory.make_name("subarch"), + other_item=factory.make_name("other"), + ) + image = make_image_spec() + mapping = set_resource(image_spec=image, resource=resource) + metadata = mapping.dump_json() + + # Lack of consistency across maas in naming arch vs architecture + # and subarch vs subarchitecture means I can't just do a simple + # dict parameter expansion here. + params = { + "osystem": image.os, + "architecture": image.arch, + "subarchitecture": image.subarch, + "release": image.release, + "label": image.label, + } + extracted_data = extract_metadata(metadata, params) + + # We only expect the supported_subarches key from the resource data. + expected = dict(supported_subarches=resource["subarches"]) + self.assertEqual(expected, extracted_data) + + def test_extract_metadata_handles_missing_subarch(self): + resource = dict( + other_item=factory.make_name("other"), + ) + image = make_image_spec() + mapping = set_resource(image_spec=image, resource=resource) + metadata = mapping.dump_json() + + # Lack of consistency across maas in naming arch vs architecture + # and subarch vs subarchitecture means I can't just do a simple + # dict parameter expansion here. + params = { + "osystem": image.os, + "architecture": image.arch, + "subarchitecture": image.subarch, + "release": image.release, + "label": image.label, + } + self.assertEqual({}, extract_metadata(metadata, params)) + + def _make_path(self): + osystem = factory.make_name("os") + arch = factory.make_name("arch") + subarch = factory.make_name("subarch") + release = factory.make_name("release") + label = factory.make_name("label") + path = (osystem, arch, subarch, release, label) + return path, osystem, arch, subarch, release, label + + def _patch_osystem_registry(self, values, xinstall_params=None): + get_item = self.patch(OperatingSystemRegistry, "get_item") + item_mock = Mock() + item_mock.get_boot_image_purposes.return_value = values + if xinstall_params is not None: + item_mock.get_xinstall_parameters.return_value = xinstall_params + get_item.return_value = item_mock + + def test_extract_image_params_with_no_metadata(self): + path, osystem, arch, subarch, release, label = self._make_path() + + # Patch OperatingSystemRegistry to return a fixed list of + # values. + purpose1 = factory.make_name("purpose") + purpose2 = factory.make_name("purpose") + xi_purpose = "xinstall" + xi_path = factory.make_name("xi_path") + xi_type = factory.make_name("xi_type") + purposes = [purpose1, purpose2, xi_purpose] + self._patch_osystem_registry( + purposes, xinstall_params=(xi_path, xi_type)) + + params = extract_image_params(path, "") + + self.assertItemsEqual( + [ + { + "osystem": osystem, + "architecture": arch, + "subarchitecture": subarch, + "release": release, + "label": label, + "purpose": purpose1, + "xinstall_path": '', + "xinstall_type": '', + }, + { + "osystem": osystem, + "architecture": arch, + "subarchitecture": subarch, + "release": release, + "label": label, + "purpose": purpose2, + "xinstall_path": '', + "xinstall_type": '', + }, + { + "osystem": osystem, + "architecture": arch, + "subarchitecture": subarch, + "release": release, + "label": label, + "purpose": xi_purpose, + "xinstall_path": xi_path, + "xinstall_type": xi_type, + }, + ], + params) + + def test_extract_image_params_with_metadata(self): + path, osystem, arch, subarch, release, label = self._make_path() + + # Patch OperatingSystemRegistry to return a fixed list of + # values. + purpose1 = factory.make_name("purpose") + purpose2 = factory.make_name("purpose") + xi_purpose = "xinstall" + xi_path = factory.make_name("xi_path") + xi_type = factory.make_name("xi_type") + purposes = [purpose1, purpose2, xi_purpose] + self._patch_osystem_registry( + purposes, xinstall_params=(xi_path, xi_type)) + + # Create some maas.meta content. + image = ImageSpec( + os=osystem, arch=arch, subarch=subarch, release=release, + label=label) + image_resource = dict(subarches=factory.make_name("subarches")) + mapping = BootImageMapping() + mapping.setdefault(image, image_resource) + maas_meta = mapping.dump_json() + + params = extract_image_params(path, maas_meta) + + self.assertItemsEqual( + [ + { + "osystem": osystem, + "architecture": arch, + "subarchitecture": subarch, + "release": release, + "label": label, + "purpose": purpose1, + "xinstall_path": '', + "xinstall_type": '', + "supported_subarches": image_resource["subarches"], + }, + { + "osystem": osystem, + "architecture": arch, + "subarchitecture": subarch, + "release": release, + "label": label, + "purpose": purpose2, + "xinstall_path": '', + "xinstall_type": '', + "supported_subarches": image_resource["subarches"], + }, + { + "osystem": osystem, + "architecture": arch, + "subarchitecture": subarch, + "release": release, + "label": label, + "purpose": xi_purpose, + "xinstall_path": xi_path, + "xinstall_type": xi_type, + "supported_subarches": image_resource["subarches"], + }, + ], + params) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_uefi.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_uefi.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_uefi.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_uefi.py 2015-07-10 01:27:14.000000000 +0000 @@ -15,15 +15,22 @@ __all__ = [] import re +from urlparse import urlparse from maastesting.factory import factory -from maastesting.testcase import MAASTestCase +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) from provisioningserver.boot import BytesReader from provisioningserver.boot.tftppath import compose_image_path from provisioningserver.boot.uefi import ( + get_main_archive_url, re_config_file, UEFIBootMethod, ) +from provisioningserver.rpc import region +from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture from provisioningserver.tests.test_kernel_opts import make_kernel_parameters from testtools.matchers import ( IsInstance, @@ -31,6 +38,10 @@ MatchesRegex, StartsWith, ) +from twisted.internet.defer import ( + inlineCallbacks, + succeed, + ) def compose_config_path(mac=None, arch=None, subarch=None): @@ -58,8 +69,32 @@ return "grub/grub.cfg" -class TestRenderUEFIConfig(MAASTestCase): - """Tests for `provisioningserver.boot.uefi.UEFIBootMethod`.""" +class TestGetMainArchiveUrl(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def patch_rpc_methods(self, return_value=None): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop(region.GetArchiveMirrors) + protocol.GetArchiveMirrors.return_value = return_value + return protocol, connecting + + @inlineCallbacks + def test_get_main_archive_url(self): + mirrors = { + 'main': urlparse(factory.make_url('ports')), + 'ports': urlparse(factory.make_url('ports')), + } + return_value = succeed(mirrors) + protocol, connecting = self.patch_rpc_methods(return_value) + self.addCleanup((yield connecting)) + value = yield get_main_archive_url() + expected_url = mirrors['main'].geturl() + self.assertEqual(expected_url, value) + + +class TestUEFIBootMethodRender(MAASTestCase): + """Tests for `provisioningserver.boot.uefi.UEFIBootMethod.render`.""" def test_get_reader(self): # Given the right configuration options, the UEFI configuration is @@ -75,7 +110,7 @@ self.assertThat(output, StartsWith("set default=\"0\"")) # The UEFI parameters are all set according to the options. image_dir = compose_image_path( - arch=params.arch, subarch=params.subarch, + osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( @@ -128,7 +163,7 @@ The path is intended to match `re_config_file`, and the components are the expected groups from a match. """ - components = {"mac": factory.getRandomMACAddress(":"), + components = {"mac": factory.make_mac_address(":"), "arch": None, "subarch": None} config_path = compose_config_path(components["mac"]) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_utils.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -30,8 +30,8 @@ def test_get_packages(self): archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") - release_gpg = factory.getRandomString() - packages_gz = factory.getRandomString() + release_gpg = factory.make_string() + packages_gz = factory.make_string() url = utils.urljoin(archive, 'dists', release) release_url = utils.urljoin(url, 'Release') @@ -64,8 +64,8 @@ def test_get_packages_errors_on_invalid_checksum(self): archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") - release_gpg = factory.getRandomString() - packages_gz = factory.getRandomString() + release_gpg = factory.make_string() + packages_gz = factory.make_string() packages_path = '%s/binary-%s/Packages.gz' % (comp, arch) packages_gz_md5 = utils.get_md5sum(packages_gz + '0') @@ -109,7 +109,7 @@ archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") - package_data = factory.getRandomString() + package_data = factory.make_string() package_md5 = utils.get_md5sum(package_data) package_info = { 'Package': package, @@ -137,7 +137,7 @@ archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") - package_data = factory.getRandomString() + package_data = factory.make_string() package_md5 = utils.get_md5sum(package_data + '0') package_info = { 'Package': package, diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_windows.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_windows.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tests/test_windows.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tests/test_windows.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,400 @@ +# Copyright 2014 Cloudbase Solutions SRL. +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.boot.windows`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import logging +import os +import shutil + +from fixtures import FakeLogger +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +import mock +from mock import sentinel +from provisioningserver.boot import ( + BootMethodError, + BytesReader, + windows as windows_module, + ) +from provisioningserver.boot.windows import ( + Bcd, + WindowsPXEBootMethod, + ) +from provisioningserver.config import Config +from provisioningserver.rpc.exceptions import NoSuchNode +from provisioningserver.rpc.region import RequestNodeInfoByMACAddress +from provisioningserver.rpc.testing import ( + always_fail_with, + always_succeed_with, + ) +from provisioningserver.tests.test_kernel_opts import make_kernel_parameters +from testtools.deferredruntest import extract_result +from testtools.matchers import Is +from tftp.backend import FilesystemReader +from twisted.internet.defer import inlineCallbacks + + +class TestBcd(MAASTestCase): + + def configure_hivex(self): + mock_hivex = mock.MagicMock() + self.patch(windows_module, 'load_hivex').return_value = mock_hivex + mock_hivex.node_name.side_effect = ['Objects', + Bcd.GUID_WINDOWS_BOOTMGR, + Bcd.BOOT_MGR_DISPLAY_ORDER] + mock_hivex.node_children.side_effect = [ + [factory.make_name('objects')], [factory.make_name('object')], + ['value0', factory.make_UUID()], + [factory.make_name('element')]] + mock_hivex.node_values.return_value = [factory.make_name('val')] + + def configure_bcd(self, uids=None): + self.configure_hivex() + filename = factory.make_name('filename') + bcd = Bcd(filename) + bcd.uids = mock.MagicMock(spec=dict) + if uids is None: + uids = [factory.make_name('uid'), + factory.make_name('uid')] + bcd.uids.__getitem__.return_value = uids + bcd.hive = mock.MagicMock() + return bcd + + def test_get_loader(self): + bcd = self.configure_bcd() + + mock_elem = factory.make_name('elem') + bootmgr_elems = mock.MagicMock(spec=dict) + bootmgr_elems.__getitem__.return_value = mock_elem + + mock_node_value = factory.make_name('node_value') + bcd.hive.node_values.return_value = [mock_node_value] + mock_string = factory.make_name('strings') + bcd.hive.value_multiple_strings.return_value = [mock_string] + + response = bcd._get_loader(bootmgr_elems) + self.assertThat(bcd.hive.node_values, MockCalledOnceWith(mock_elem)) + self.assertThat( + bcd.hive.value_multiple_strings, + MockCalledOnceWith(mock_node_value)) + self.assertEqual(mock_string, response) + + def test_get_loader_elems(self): + mock_uid_0 = factory.make_name('uid') + mock_uid_1 = factory.make_name('uid') + bcd = self.configure_bcd(uids=[mock_uid_0, mock_uid_1]) + + mock_child = factory.make_name('child') + bcd.hive.node_children.side_effect = [[mock_child]] + mock_name = factory.make_name('name') + bcd.hive.node_name.return_value = mock_name + + response = bcd._get_loader_elems() + self.assertThat(bcd.hive.node_children, MockCalledOnceWith(mock_uid_1)) + self.assertThat(bcd.hive.node_name, MockCalledOnceWith(mock_child)) + self.assertEqual(response, {mock_name: mock_child}) + + def test_get_load_options_key(self): + bcd = self.configure_bcd() + + fake_load_elem = factory.make_name('load_elem') + mock_load_elem = mock.MagicMock() + mock_load_elem.get.return_value = fake_load_elem + + mock_get_loader_elems = self.patch(Bcd, '_get_loader_elems') + mock_get_loader_elems.return_value = mock_load_elem + + response = bcd._get_load_options_key() + self.assertThat( + mock_get_loader_elems, MockCalledOnceWith()) + self.assertThat( + mock_load_elem.get, MockCalledOnceWith(bcd.LOAD_OPTIONS, None)) + self.assertEqual(response, fake_load_elem) + + def test_set_load_options(self): + mock_uid_0 = factory.make_name('uid') + mock_uid_1 = factory.make_name('uid') + bcd = self.configure_bcd(uids=[mock_uid_0, mock_uid_1]) + + fake_value = factory.make_name('value') + mock_get_load_options_key = self.patch(Bcd, '_get_load_options_key') + mock_get_load_options_key.return_value = None + + fake_child = factory.make_name('child') + bcd.hive.node_add_child.return_value = fake_child + bcd.set_load_options(value=fake_value) + + compare = {'t': 1, + 'key': "Element", + 'value': fake_value.decode('utf-8').encode('utf-16le'), + } + self.assertThat( + mock_get_load_options_key, MockCalledOnceWith()) + self.assertThat( + bcd.hive.node_add_child, + MockCalledOnceWith(mock_uid_1, bcd.LOAD_OPTIONS)) + self.assertThat( + bcd.hive.node_set_value, + MockCalledOnceWith(fake_child, compare)) + self.assertThat(bcd.hive.commit, MockCalledOnceWith(None)) + + +class TestRequestNodeInfoByMACAddress(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test__returns_None_when_MAC_is_None(self): + logger = self.useFixture(FakeLogger("maas", logging.DEBUG)) + d = windows_module.request_node_info_by_mac_address(None) + self.assertThat(extract_result(d), Is(None)) + self.assertDocTestMatches( + "Cannot determine node; MAC address is unknown.", + logger.output) + + def test__returns_None_when_node_not_found(self): + logger = self.useFixture(FakeLogger("maas", logging.DEBUG)) + client = self.patch(windows_module, "getRegionClient").return_value + client.side_effect = always_fail_with(NoSuchNode()) + mac = factory.make_mac_address() + d = windows_module.request_node_info_by_mac_address(mac) + self.assertThat(extract_result(d), Is(None)) + self.assertDocTestMatches( + "Node doesn't exist for MAC address: %s" % mac, + logger.output) + + def test__returns_output_from_RequestNodeInfoByMACAddress(self): + client = self.patch(windows_module, "getRegionClient").return_value + client.side_effect = always_succeed_with(sentinel.node_info) + d = windows_module.request_node_info_by_mac_address(sentinel.mac) + self.assertThat(extract_result(d), Is(sentinel.node_info)) + self.assertThat(client, MockCalledOnceWith( + RequestNodeInfoByMACAddress, mac_address=sentinel.mac)) + + +class TestWindowsPXEBootMethod(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def setUp(self): + self.patch(Config, 'load_from_cache') + self.patch(windows_module, 'get_hivex_module') + super(TestWindowsPXEBootMethod, self).setUp() + + def test_clean_path(self): + method = WindowsPXEBootMethod() + parts = [factory.make_string() for _ in range(3)] + dirty_path = '\\'.join(parts) + valid_path = dirty_path.lower().replace('\\', '/') + clean_path = method.clean_path(dirty_path) + self.assertEqual(valid_path, clean_path) + + def test_clean_path_strip_boot(self): + method = WindowsPXEBootMethod() + dirty_path = '\\Boot\\BCD' + clean_path = method.clean_path(dirty_path) + self.assertEqual('bcd', clean_path) + + def test_get_node_info(self): + method = WindowsPXEBootMethod() + mac = factory.make_mac_address() + self.patch(windows_module, 'get_remote_mac').return_value = mac + mock_request_node_info = self.patch( + windows_module, 'request_node_info_by_mac_address') + method.get_node_info() + self.assertThat( + mock_request_node_info, MockCalledOnceWith(mac)) + + @inlineCallbacks + def test_match_path_pxelinux(self): + method = WindowsPXEBootMethod() + method.remote_path = factory.make_string() + mock_mac = factory.make_mac_address() + mock_get_node_info = self.patch(method, 'get_node_info') + mock_get_node_info.return_value = { + 'purpose': 'install', + 'osystem': 'windows', + 'mac': mock_mac, + } + + params = yield method.match_path(None, 'pxelinux.0') + self.assertEqual(mock_mac, params['mac']) + self.assertEqual(method.bootloader_path, params['path']) + + @inlineCallbacks + def test_match_path_pxelinux_only_on_install(self): + method = WindowsPXEBootMethod() + method.remote_path = factory.make_string() + mock_mac = factory.make_mac_address() + mock_get_node_info = self.patch(method, 'get_node_info') + mock_get_node_info.return_value = { + 'purpose': factory.make_string(), + 'osystem': 'windows', + 'mac': mock_mac, + } + + params = yield method.match_path(None, 'pxelinux.0') + self.assertEqual(params, None) + + @inlineCallbacks + def test_match_path_pxelinux_missing_hivex(self): + method = WindowsPXEBootMethod() + method.remote_path = factory.make_string() + mock_mac = factory.make_mac_address() + mock_get_node_info = self.patch(method, 'get_node_info') + mock_get_node_info.return_value = { + 'purpose': factory.make_string(), + 'osystem': 'windows', + 'mac': mock_mac, + } + + self.patch(windows_module, 'HAVE_HIVEX', ) + params = yield method.match_path(None, 'pxelinux.0') + self.assertEqual(params, None) + + @inlineCallbacks + def test_match_path_pxelinux_only_on_windows(self): + method = WindowsPXEBootMethod() + method.remote_path = factory.make_string() + mock_mac = factory.make_mac_address() + mock_get_node_info = self.patch(method, 'get_node_info') + mock_get_node_info.return_value = { + 'purpose': 'install', + 'osystem': factory.make_string(), + 'mac': mock_mac, + } + + params = yield method.match_path(None, 'pxelinux.0') + self.assertEqual(params, None) + + @inlineCallbacks + def test_match_path_pxelinux_get_node_info_None(self): + method = WindowsPXEBootMethod() + method.remote_path = factory.make_string() + mock_get_node_info = self.patch(method, 'get_node_info') + mock_get_node_info.return_value = None + + params = yield method.match_path(None, 'pxelinux.0') + self.assertEqual(params, None) + + @inlineCallbacks + def test_match_path_static_file(self): + method = WindowsPXEBootMethod() + mock_mac = factory.make_mac_address() + mock_get_node_info = self.patch(windows_module, 'get_remote_mac') + mock_get_node_info.return_value = mock_mac + + params = yield method.match_path(None, 'bootmgr.exe') + self.assertEqual(mock_mac, params['mac']) + self.assertEqual('bootmgr.exe', params['path']) + + @inlineCallbacks + def test_match_path_static_file_clean_path(self): + method = WindowsPXEBootMethod() + mock_mac = factory.make_mac_address() + mock_get_node_info = self.patch(windows_module, 'get_remote_mac') + mock_get_node_info.return_value = mock_mac + + params = yield method.match_path(None, '\\Boot\\BCD') + self.assertEqual(mock_mac, params['mac']) + self.assertEqual('bcd', params['path']) + + def test_get_reader_bcd(self): + method = WindowsPXEBootMethod() + mock_compose_bcd = self.patch(method, 'compose_bcd') + local_host = factory.make_ipv4_address() + kernel_params = make_kernel_parameters(osystem='windows') + + method.get_reader( + None, kernel_params, path='bcd', local_host=local_host) + self.assertThat( + mock_compose_bcd, MockCalledOnceWith(kernel_params, local_host)) + + def test_get_reader_static_file(self): + method = WindowsPXEBootMethod() + mock_path = factory.make_name('path') + mock_output_static = self.patch(method, 'output_static') + kernel_params = make_kernel_parameters(osystem='windows') + + method.get_reader(None, kernel_params, path=mock_path) + self.assertThat( + mock_output_static, + MockCalledOnceWith(kernel_params, mock_path)) + + def test_compose_preseed_url(self): + url = 'http://localhost/MAAS' + expected = 'http:\\\\localhost\\^M^A^A^S' + method = WindowsPXEBootMethod() + output = method.compose_preseed_url(url) + self.assertEqual(expected, output) + + def test_compose_bcd(self): + method = WindowsPXEBootMethod() + local_host = factory.make_ipv4_address() + kernel_params = make_kernel_parameters() + + fake_output = factory.make_string().encode('utf-8') + self.patch(os.path, 'isfile').return_value = True + self.patch(shutil, 'copyfile') + self.patch(windows_module, 'Bcd') + + with mock.patch( + 'provisioningserver.boot.windows.open', + mock.mock_open(read_data=fake_output), create=True): + output = method.compose_bcd(kernel_params, local_host) + + self.assertTrue(isinstance(output, BytesReader)) + self.assertEqual(fake_output, output.read(-1)) + + def test_compose_bcd_missing_template(self): + method = WindowsPXEBootMethod() + self.patch(method, 'get_resource_path').return_value = '' + local_host = factory.make_ipv4_address() + kernel_params = make_kernel_parameters() + + self.assertRaises( + BootMethodError, method.compose_bcd, kernel_params, local_host) + + def test_get_resouce_path(self): + fake_tftproot = factory.make_name('tftproot') + mock_config = self.patch(windows_module, 'Config') + mock_config.load_from_cache.return_value = { + 'tftp': { + 'resource_root': fake_tftproot, + }, + } + method = WindowsPXEBootMethod() + fake_path = factory.make_name('path') + fake_kernelparams = make_kernel_parameters() + result = method.get_resource_path(fake_kernelparams, fake_path) + expected = os.path.join( + fake_tftproot, 'windows', fake_kernelparams.arch, + fake_kernelparams.subarch, fake_kernelparams.release, + fake_kernelparams.label, fake_path) + self.assertEqual(expected, result) + + def test_output_static(self): + method = WindowsPXEBootMethod() + contents = factory.make_string() + temp_dir = self.make_dir() + filename = factory.make_file(temp_dir, "resource", contents=contents) + self.patch(method, 'get_resource_path').return_value = filename + result = method.output_static(None, None) + self.assertIsInstance(result, FilesystemReader) + self.assertEqual(contents, result.read(10000)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/tftppath.py maas-1.7.6+bzr3376/src/provisioningserver/boot/tftppath.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/tftppath.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/tftppath.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,7 +14,6 @@ __metaclass__ = type __all__ = [ 'compose_image_path', - 'drill_down', 'list_boot_images', 'list_subdirs', 'locate_tftp_path', @@ -22,19 +21,30 @@ import errno from itertools import chain -from logging import getLogger import os.path +from provisioningserver import config +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystemRegistry, + ) +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.helpers import ImageSpec +from provisioningserver.logger import get_maas_logger -logger = getLogger(__name__) +maaslog = get_maas_logger("tftp") -def compose_image_path(arch, subarch, release, label): + +def compose_image_path(osystem, arch, subarch, release, label): """Compose the TFTP path for a PXE kernel/initrd directory. The path returned is relative to the TFTP root, as it would be identified by clients on the network. + :param osystem: Operating system. :param arch: Main machine architecture. :param subarch: Sub-architecture, or "generic" if there is none. :param release: Operating system release, e.g. "precise". @@ -43,7 +53,7 @@ kernel and initrd) as exposed over TFTP. """ # This is a TFTP path, not a local filesystem path, so hard-code the slash. - return '/'.join([arch, subarch, release, label]) + return '/'.join([osystem, arch, subarch, release, label]) def locate_tftp_path(path, tftproot): @@ -110,25 +120,103 @@ extend_path(directory, path) for path in paths)) -def extract_image_params(path): +def extract_metadata(metadata, params): + """Examine the maas.meta file for any required metadata. + + :param metadata: contents of the maas.meta file + :param params: A dict of path components for the image + (architecture, subarchitecture, release and label). + :return: a dict of name/value metadata pairs. Currently, only + "subarches" is extracted. + """ + mapping = BootImageMapping.load_json(metadata) + + image = ImageSpec( + os=params["osystem"], + arch=params["architecture"], + subarch=params["subarchitecture"], + release=params["release"], + label=params["label"], + ) + try: + # On upgrade from 1.5 to 1.6, the subarches does not exist in the + # maas.meta file . Without this catch boot images will fail to + # report until the boot images are imported again. + subarches = mapping.mapping[image]['subarches'] + except KeyError: + return {} + + return dict(supported_subarches=subarches) + + +def extract_image_params(path, maas_meta): """Represent a list of TFTP path elements as a list of boot-image dicts. - The path must consist of a full [architecture, subarchitecture, release] - that identify a kind of boot that we may need an image for. + :param path: Tuple or list that consists of a full [osystem, architecture, + subarchitecture, release] that identify a kind of boot for which we + may need an image. + :param maas_meta: Contents of the maas.meta file. This may be an + empty string. + + :return: A list of dicts, each of which may also include additional + items of meta-data that are not elements in the path, such as + "subarches". """ - arch, subarch, release, label = path - # XXX: rvb 2014-03-24: The images import script currently imports all the - # images for the configured selections (where a selection is an - # arch/subarch/series/label combination). When the import script grows the - # ability to import the images for a particular purpose, we need to change - # this code to report what is actually present. - purposes = ['commissioning', 'install', 'xinstall'] - return [ - dict( - architecture=arch, subarchitecture=subarch, + osystem, arch, subarch, release, label = path + osystem_obj = OperatingSystemRegistry.get_item(osystem, default=None) + if osystem_obj is None: + return [] + + purposes = osystem_obj.get_boot_image_purposes( + arch, subarch, release, label) + + # Expand the path into a list of dicts, one for each boot purpose. + params = [] + for purpose in purposes: + image = dict( + osystem=osystem, architecture=arch, subarchitecture=subarch, release=release, label=label, purpose=purpose) - for purpose in purposes - ] + if purpose == BOOT_IMAGE_PURPOSE.XINSTALL: + xinstall_path, xinstall_type = osystem_obj.get_xinstall_parameters( + arch, subarch, release, label) + image['xinstall_path'] = xinstall_path + image['xinstall_type'] = xinstall_type + else: + image['xinstall_path'] = '' + image['xinstall_type'] = '' + params.append(image) + + # Merge in the meta-data. + for image_dict in params: + metadata = extract_metadata(maas_meta, image_dict) + image_dict.update(metadata) + + return params + + +def maas_meta_file_path(tftproot): + """Return a string containing the full path to maas.meta.""" + return os.path.join(tftproot, 'maas.meta') + + +def maas_meta_last_modified(tftproot=None): + """Return time of last modification of maas.meta. + + The time is the same as returned from getmtime() (seconds since epoch), + or None if the file doesn't exist. + + :param tftproot: Optional tftp root dir, defaults to + provisioningserver.config.BOOT_RESOURCES_STORAGE + """ + if tftproot is None: + tftproot = os.path.join(config.BOOT_RESOURCES_STORAGE, 'current') + meta_file = maas_meta_file_path(tftproot) + try: + return os.path.getmtime(meta_file) + except OSError as e: + if e.errno == errno.ENOENT: + return None + raise def list_boot_images(tftproot): @@ -139,30 +227,44 @@ `report_boot_images` API call. """ # The sub-directories directly under tftproot, if they contain - # images, represent architectures. + # images, represent operating systems. try: - potential_archs = list_subdirs(tftproot) + potential_osystems = list_subdirs(tftproot) except OSError as exception: if exception.errno == errno.ENOENT: # Directory does not exist, so return empty list. - logger.warning("No boot images have been imported yet.") + maaslog.warning( + "No boot images have been imported from the region.") return [] - else: - # Other error. Propagate. - raise + + # Other error. Propagate. + raise # Starting point for iteration: paths that contain only the # top-level subdirectory of tftproot, i.e. the architecture name. - paths = [[subdir] for subdir in potential_archs] + paths = [[subdir] for subdir in potential_osystems] # Extend paths deeper into the filesystem, through the levels that - # represent sub-architecture, release, and label. Any directory - # that doesn't extend this deep isn't a boot image. - for level in ['subarch', 'release', 'label']: + # represent architecture, sub-architecture, release, and label. + # Any directory that doesn't extend this deep isn't a boot image. + for level in ['arch', 'subarch', 'release', 'label']: paths = drill_down(tftproot, paths) + # Get hold of image meta-data stored in the maas.meta file. + meta_file_path = maas_meta_file_path(tftproot) + try: + with open(meta_file_path, "rb") as f: + metadata = f.read() + except IOError as e: + if e.errno != errno.ENOENT: + # Unexpected error, propagate. + raise + # No meta file (yet), it means no import has run so just skip + # it. + metadata = "" + # Each path we find this way should be a boot image. # This gets serialised to JSON, so we really have to return a list, not # just any iterable. return list(chain.from_iterable( - extract_image_params(path) for path in paths)) + extract_image_params(path, metadata) for path in paths)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/uefi.py maas-1.7.6+bzr3376/src/provisioningserver/boot/uefi.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/uefi.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/uefi.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,6 +21,7 @@ import re from textwrap import dedent import urllib2 +from urlparse import urljoin from provisioningserver.boot import ( BootMethod, @@ -33,13 +34,17 @@ install_bootloader, make_destination, ) -from provisioningserver.utils import ( - call_and_check, - tempdir, +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.region import GetArchiveMirrors +from provisioningserver.utils.fs import tempdir +from provisioningserver.utils.shell import call_and_check +from provisioningserver.utils.twisted import asynchronous +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, ) -ARCHIVE_URL = "http://archive.ubuntu.com/ubuntu/dists/" ARCHIVE_PATH = "/main/uefi/grub2-amd64/current/grubnetx64.efi.signed" CONFIG_FILE = dedent(""" @@ -83,7 +88,21 @@ re_config_file = re.compile(re_config_file, re.VERBOSE) -def archive_grubnet_urls(): +@asynchronous +def get_archive_mirrors(): + client = getRegionClient() + return client(GetArchiveMirrors) + + +@asynchronous(timeout=10) +@inlineCallbacks +def get_main_archive_url(): + mirrors = yield get_archive_mirrors() + main_url = mirrors['main'].geturl() + returnValue(main_url) + + +def archive_grubnet_urls(main_url): """Paths to try to download grubnetx64.efi.signed.""" release = utils.get_distro_release() # grubnetx64 will not work below version trusty, as efinet is broken @@ -91,16 +110,19 @@ # this should not block any of the previous release from running. if release in ['lucid', 'precise', 'quantal', 'saucy']: release = 'trusty' + if not main_url.endswith('/'): + main_url = main_url + '/' + dists_url = urljoin(main_url, 'dists') for dist in ['%s-updates' % release, release]: yield "%s/%s/%s" % ( - ARCHIVE_URL.rstrip("/"), + dists_url.rstrip("/"), dist, ARCHIVE_PATH.rstrip("/")) -def download_grubnet(destination): +def download_grubnet(main_url, destination): """Downloads grubnetx64.efi.signed from the archive.""" - for url in archive_grubnet_urls(): + for url in archive_grubnet_urls(main_url): try: response = urllib2.urlopen(url) # Okay, if it fails as the updates area might not hold @@ -118,6 +140,7 @@ name = "uefi" template_subdir = "uefi" + bootloader_arches = ['amd64'] bootloader_path = "bootx64.efi" arch_octet = "00:07" # AMD64 EFI @@ -127,7 +150,7 @@ :param backend: requesting backend :param path: requested path - :returns: dict of match params from path, None if no match + :return: dict of match params from path, None if no match """ match = re_config_file.match(path) if match is None: @@ -160,10 +183,11 @@ """Installs the required files for UEFI booting into the tftproot. """ + archive_url = get_main_archive_url() with tempdir() as tmp: # Download the shim-signed package data, filename = utils.get_updates_package( - 'shim-signed', 'http://archive.ubuntu.com/ubuntu', + 'shim-signed', archive_url, 'main', 'amd64') if data is None: raise BootMethodInstallError( @@ -181,7 +205,7 @@ # Download grubnetx64 from the archive and install grub_tmp = os.path.join(tmp, 'grubnetx64.efi.signed') - if download_grubnet(grub_tmp) is False: + if download_grubnet(archive_url, grub_tmp) is False: raise BootMethodInstallError( 'Failed to download grubnetx64.efi.signed ' 'from the archive.') diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/utils.py maas-1.7.6+bzr3376/src/provisioningserver/boot/utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -24,13 +24,10 @@ from platform import linux_distribution import re import StringIO -import subprocess import urllib2 -from provisioningserver.utils import ( - call_capture_and_check, - tempdir, - ) +from provisioningserver.utils.fs import tempdir +from provisioningserver.utils.shell import call_and_check def urljoin(*args): @@ -47,9 +44,12 @@ """Downloads the file from the URL. :param url: URL to download file - :returns: File data, or None + :return: File data, or None """ - response = urllib2.urlopen(url) + # Build a new opener so that the environment is checked for proxy + # URLs. Using urllib2.urlopen() means that we'd only be using the + # proxies as defined when urlopen() was called the first time. + response = urllib2.build_opener().open(url) return response.read() @@ -71,14 +71,13 @@ with open(data_out, 'wb') as stream: stream.write(data_file) - args = [ + call_and_check([ "gpgv", "--keyring", "/etc/apt/trusted.gpg", sig_out, data_out - ] - call_capture_and_check(args, stderr=subprocess.STDOUT) + ]) def decompress_packages(packages): diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot/windows.py maas-1.7.6+bzr3376/src/provisioningserver/boot/windows.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot/windows.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot/windows.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,338 @@ +# Copyright 2014 Cloudbase Solutions SRL. +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Windows PXE Boot Method""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'WindowsPXEBootMethod', + ] + +import os.path +import re +import shutil +import sys + +from provisioningserver.boot import ( + BootMethod, + BootMethodError, + BytesReader, + get_remote_mac, + ) +from provisioningserver.config import Config +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.exceptions import NoSuchNode +from provisioningserver.rpc.region import RequestNodeInfoByMACAddress +from provisioningserver.utils.fs import tempdir +from provisioningserver.utils.twisted import ( + asynchronous, + deferred, + ) +from tftp.backend import FilesystemReader +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, + succeed, + ) +from twisted.python.context import get +from twisted.python.filepath import FilePath + + +maaslog = get_maas_logger("windows") + + +# These files do not exist in the tftproot. WindowsPXEBootMethod +# handles access to these files returning the correct version +# of the file for the booting version of Windows. +# +# Note: Each version of Windows can have different content for +# these files. +STATIC_FILES = [ + 'pxeboot.0', + 'bootmgr.exe', + '\\boot\\bcd', + '\\boot\\winpe.wim', + '\\boot\\boot.sdi', + '\\boot\\font\\wgl4_boot.ttf', + ] + + +def get_hivex_module(): + """Returns the hivex module if avaliable. + + python-hivex is an optional dependency, but it is needed + before MAAS can boot Windows. + """ + if 'hivex' not in sys.modules: + try: + __import__('hivex') + except ImportError: + return None + return sys.modules['hivex'] + + +def load_hivex(*args, **kwargs): + """Returns the Hivex object.""" + module = get_hivex_module() + if module is None: + return None + return module.Hivex(*args, **kwargs) + + +@asynchronous +def request_node_info_by_mac_address(mac_address): + """Request node info for the given mac address. + + :param mac_address: The MAC Address of the node of the event. + :type mac_address: unicode + """ + if mac_address is None: + maaslog.debug("Cannot determine node; MAC address is unknown.") + return succeed(None) + + client = getRegionClient() + d = client(RequestNodeInfoByMACAddress, mac_address=mac_address) + + def eb_request_node_info(failure): + failure.trap(NoSuchNode) + maaslog.debug("Node doesn't exist for MAC address: %s", mac_address) + return None + + return d.addErrback(eb_request_node_info) + + +class Bcd: + """Allows modification of the load options in a Windows boot + configuration data file. + + References: + http://msdn.microsoft.com/en-us/library/windows/desktop/ + - aa362652(v=vs.85).aspx + - aa362641(v=vs.85).aspx + """ + + GUID_WINDOWS_BOOTMGR = '{9dea862c-5cdd-4e70-acc1-f32b344d4795}' + BOOT_MGR_DISPLAY_ORDER = '24000001' + LOAD_OPTIONS = '12000030' + + def __init__(self, filename): + self.hive = load_hivex(filename, write=True) + + # uids + objects = self._get_root_objects() + self.uids = {} + for i in self.hive.node_children(objects): + self.uids[self.hive.node_name(i)] = self.hive.node_children(i) + + # default bootloader + mgr = self.uids[self.GUID_WINDOWS_BOOTMGR][1] + bootmgr_elems = dict([(self.hive.node_name(i), i) for i in + self.hive.node_children(mgr)]) + self.loader = self._get_loader(bootmgr_elems) + + def _get_root_elements(self): + """Gets the root from the hive.""" + root = self.hive.root() + r_elems = {} + for i in self.hive.node_children(root): + name = self.hive.node_name(i) + r_elems[name] = i + return r_elems + + def _get_root_objects(self): + """Gets the root objects.""" + elems = self._get_root_elements() + return elems['Objects'] + + def _get_loader(self, bootmgr_elems): + """Get default bootloader.""" + (val,) = self.hive.node_values( + bootmgr_elems[self.BOOT_MGR_DISPLAY_ORDER]) + loader = self.hive.value_multiple_strings(val)[0] + return loader + + def _get_loader_elems(self): + """Get elements present in default boot loader. We need this + in order to determine the loadoptions key. + """ + return dict( + [(self.hive.node_name(i), i) + for i in self.hive.node_children(self.uids[self.loader][1])]) + + def _get_load_options_key(self): + """Gets the key containing the load options we want to edit.""" + load_elem = self._get_loader_elems() + load_option_key = load_elem.get(self.LOAD_OPTIONS, None) + return load_option_key + + def set_load_options(self, value): + """Sets the loadoptions value to param:value.""" + h = self._get_load_options_key() + if h is None: + # No load options key in the hive, add the key + # so the value can be set. + h = self.hive.node_add_child( + self.uids[self.loader][1], self.LOAD_OPTIONS) + k_type = 1 + key = "Element" + data = { + 't': k_type, + 'key': key, + # Windows only accepts utf-16le in load options. + 'value': value.decode('utf-8').encode('utf-16le'), + } + self.hive.node_set_value(h, data) + self.hive.commit(None) + + +class WindowsPXEBootMethod(BootMethod): + + name = "windows" + template_subdir = "windows" + bootloader_path = "pxeboot.0" + arch_octet = None + + @deferred + def get_node_info(self): + """Gets node information via the remote mac.""" + remote_mac = get_remote_mac() + return request_node_info_by_mac_address(remote_mac) + + def clean_path(self, path): + """Converts Windows path into a unix path and strips the + boot subdirectory from the paths. + """ + path = path.lower().replace('\\', '/') + if path[0:6] == "/boot/": + path = path[6:] + return path + + @inlineCallbacks + def match_path(self, backend, path): + """Checks path to see if the boot method should handle + the requested file. + + :param backend: requesting backend + :param path: requested path + :return: dict of match params from path, None if no match + """ + # If the node is requesting the initial bootloader, then we + # need to see if this node is set to boot Windows first. + local_host, local_port = get("local", (None, None)) + if path == 'pxelinux.0': + data = yield self.get_node_info() + if data is None: + returnValue(None) + + # Only provide the Windows bootloader when installing + # PXELINUX chainloading will work for the rest of the time. + purpose = data.get('purpose') + if purpose != 'install': + returnValue(None) + + osystem = data.get('osystem') + if osystem == 'windows': + # python-hivex is needed to continue. + if get_hivex_module() is None: + raise BootMethodError('python-hivex package is missing.') + + returnValue({ + 'mac': data.get('mac'), + 'path': self.bootloader_path, + 'local_host': local_host, + }) + # Fix the paths for the other static files, Windows requests. + elif path.lower() in STATIC_FILES: + returnValue({ + 'mac': get_remote_mac(), + 'path': self.clean_path(path), + 'local_host': local_host, + }) + returnValue(None) + + def get_reader(self, backend, kernel_params, **extra): + """Render a configuration file as a unicode string. + + :param backend: requesting backend + :param kernel_params: An instance of `KernelParameters`. + :param extra: Allow for other arguments. This is a safety valve; + parameters generated in another component (for example, see + `TFTPBackend.get_boot_method_reader`) won't cause this to break. + """ + path = extra['path'] + if path == 'bcd': + local_host = extra['local_host'] + return self.compose_bcd(kernel_params, local_host) + return self.output_static(kernel_params, path) + + def install_bootloader(self, destination): + """Installs the required files for Windows booting into the + tftproot. + + Does nothing. Windows requires manual installation of bootloader + files, due to licensing. + """ + + def compose_preseed_url(self, url): + """Modifies the url to replace all forward slashes with + backslashes, and prepends the ^ character to any upper-case + characters. + + Boot load options of Windows will all be upper-cased + as Windows does not care about case, and what gets exposed in the + registry is all uppercase. MAAS requires a case-sensitive url. + + The Windows install script extracts the preseed url and any character + that starts with ^ is then uppercased, so that the URL is correct. + """ + url = url.replace('/', '\\') + return re.sub(r"([A-Z])", r"^\1", url) + + def get_resource_path(self, kernel_params, path): + """Gets the resource path from the kernel param.""" + resources = Config.load_from_cache()['tftp']['resource_root'] + return os.path.join( + resources, 'windows', kernel_params.arch, kernel_params.subarch, + kernel_params.release, kernel_params.label, path) + + def compose_bcd(self, kernel_params, local_host): + """Composes the Windows boot configuration data. + + :param kernel_params: An instance of `KernelParameters`. + :return: Binary data + """ + preseed_url = self.compose_preseed_url(kernel_params.preseed_url) + release_path = "%s\\source" % kernel_params.release + remote_path = "\\\\%s\\reminst" % local_host + loadoptions = "%s;%s;%s" % \ + (remote_path, release_path, preseed_url) + + # Generate the bcd file. + bcd_template = self.get_resource_path(kernel_params, "bcd") + if not os.path.isfile(bcd_template): + raise BootMethodError( + "Failed to find bcd template: %s" % bcd_template) + with tempdir() as tmp: + bcd_tmp = os.path.join(tmp, "bcd") + shutil.copyfile(bcd_template, bcd_tmp) + + bcd = Bcd(bcd_tmp) + bcd.set_load_options(loadoptions) + + with open(bcd_tmp, 'rb') as stream: + return BytesReader(stream.read()) + + def output_static(self, kernel_params, path): + """Outputs the static file based on the version of Windows.""" + actual_path = self.get_resource_path(kernel_params, path) + return FilesystemReader(FilePath(actual_path)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/boot_images.py maas-1.7.6+bzr3376/src/provisioningserver/boot_images.py --- maas-1.5.4+bzr2294/src/provisioningserver/boot_images.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/boot_images.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Dealing with boot images.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'report_to_server', - ] - -import json -from logging import getLogger - -from apiclient.maas_client import ( - MAASClient, - MAASDispatcher, - MAASOAuth, - ) -from provisioningserver.auth import get_recorded_api_credentials -from provisioningserver.boot import tftppath -from provisioningserver.cluster_config import ( - get_cluster_uuid, - get_maas_url, - ) -from provisioningserver.config import BootConfig - - -logger = getLogger(__name__) - - -def get_cached_knowledge(): - """Return cached items required to report to the server. - - :return: Tuple of cached items: (maas_url, api_credentials). Either may - be None if the information has not been received from the server yet. - """ - maas_url = get_maas_url() - if maas_url is None: - logger.debug("Not reporting boot images: don't have API URL yet.") - api_credentials = get_recorded_api_credentials() - if api_credentials is None: - logger.debug("Not reporting boot images: don't have API key yet.") - return maas_url, api_credentials - - -def submit(maas_url, api_credentials, images): - """Submit images to server.""" - path = 'api/1.0/nodegroups/%s/boot-images/' % get_cluster_uuid() - MAASClient(MAASOAuth(*api_credentials), MAASDispatcher(), maas_url).post( - path=path, op='report_boot_images', images=json.dumps(images)) - - -def report_to_server(): - """For master worker only: report available netboot images.""" - maas_url, api_credentials = get_cached_knowledge() - if not all([maas_url, api_credentials]): - return - - images = tftppath.list_boot_images( - BootConfig.load_from_cache()['boot']['storage'] + '/current/') - - submit(maas_url, api_credentials, images) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/cache.py maas-1.7.6+bzr3376/src/provisioningserver/cache.py --- maas-1.5.4+bzr2294/src/provisioningserver/cache.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/cache.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""API credentials for node-group workers.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'cache', - 'initialize', - ] - - -from multiprocessing import Manager - - -class Cache(object): - """A process-safe dict-like cache.""" - - def __init__(self, cache_backend): - self.cache_backend = cache_backend - - def set(self, key, value): - self.cache_backend[key] = value - - def get(self, key): - return self.cache_backend.get(key, None) - - def clear(self): - self.cache_backend.clear() - - -_manager = None - -cache = None - -initialized = False - - -def initialize(): - """Initialize cache of shared data between processes. - - This needs to be done exactly once, by the parent process, before it - start forking off workers. - """ - global _manager - global cache - global initialized - if not initialized: - _manager = Manager() - cache = Cache(_manager.dict()) - initialized = True diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/concurrency.py maas-1.7.6+bzr3376/src/provisioningserver/concurrency.py --- maas-1.5.4+bzr2294/src/provisioningserver/concurrency.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/concurrency.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,32 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Configuration relating to concurrency in the cluster controller. + +This module is intended as a place to define concurrency policies for code +running in the cluster controller. Typically this will take the form of a +Twisted concurrency primative, like `DeferredLock` or `DeferredSemaphore`. + +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "boot_images", + "dhcp", +] + +from twisted.internet.defer import DeferredLock + +# Limit boot image imports to one at a time. +boot_images = DeferredLock() + +# Limit DHCP changes to one at a time. +dhcp = DeferredLock() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/config.py maas-1.7.6+bzr3376/src/provisioningserver/config.py --- maas-1.5.4+bzr2294/src/provisioningserver/config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/config.py 2015-07-10 01:27:14.000000000 +0000 @@ -4,10 +4,9 @@ """MAAS Provisioning Configuration. Configuration for most elements of a Cluster Controller can be obtained -through this module's `Config` and `BootConfig` classes. At the time of -writing the exceptions are the Celery worker's configuration, as well as the -`CLUSTER_UUID` and `MAAS_URL` environment variables -(see `provisioningserver.cluster_config`). +through this module's `Config` validator class. At the time of writing the +exceptions are the `CLUSTER_UUID` and `MAAS_URL` environment variables (see +`provisioningserver.cluster_config`). It's pretty simple. Typical usage is:: @@ -53,7 +52,8 @@ __metaclass__ = type __all__ = [ - "BootConfig", + "BOOT_RESOURCES_STORAGE", + "BootSources", "Config", "ConfigBase", "ConfigMeta", @@ -72,18 +72,27 @@ ) from formencode.declarative import DeclarativeMeta from formencode.validators import ( - Bool, Int, RequireIfPresent, Set, String, ) -from provisioningserver.utils import atomic_write +from provisioningserver.utils.fs import atomic_write import yaml +# Path to the directory on the cluster controller where boot resources are +# stored. This used to be configurable in bootresources.yaml, and may become +# configurable again in the future. +BOOT_RESOURCES_STORAGE = '/var/lib/maas/boot-resources/' + class ConfigOops(Schema): - """Configuration validator for OOPS options.""" + """Configuration validator for OOPS options. + + Deprecated: MAAS no longer records OOPS reports. This remains here to + avoid validation failures when using old versions of the cluster's + configuration file. + """ if_key_missing = None @@ -96,7 +105,12 @@ class ConfigBroker(Schema): - """Configuration validator for message broker options.""" + """Configuration validator for message broker options. + + Deprecated: MAAS no longer uses a message broker. This remains here to + avoid validation failures when using old versions of the cluster's + configuration file. + """ if_key_missing = None @@ -121,19 +135,20 @@ root = String(if_missing="/var/lib/maas/tftp") # TFTP root directory, managed by the Simplestreams-based import script. - # Equates to $storage/current. The import script maintains "current" as a - # symlink pointing to the most recent images. + # The import script maintains "current" as a symlink pointing to the most + # recent images. + # XXX jtv 2014-05-22: Redundant with BOOT_RESOURCES_STORAGE. resource_root = String( - if_missing="/var/lib/maas/boot-resources/current/") + if_missing=os.path.join(BOOT_RESOURCES_STORAGE, 'current/')) port = Int(min=1, max=65535, if_missing=69) generator = String(if_missing=b"http://localhost/MAAS/api/1.0/pxeconfig/") class ConfigLegacyEphemeral(Schema): - """Legacy `eephemeral` section in `pserv.yaml` prior to MAAS 1.5. + """Legacy `ephemeral` section in `pserv.yaml` prior to MAAS 1.5. - This has been replaced with boot-source selection in `bootresources.yaml`. + This has been superseded by boot sources. It is still accepted in `pserv.yaml`, but not used. """ if_key_missing = None @@ -144,8 +159,8 @@ class ConfigLegacyBoot(Schema): """Legacy `boot` section in `pserv.yaml` prior to MAAS 1.5. - The new version of this config section lives in `bootresources.yaml`. It - is still accepted in `pserv.yaml`, but not used. + This has been superseded by boot sources. + It is still accepted in `pserv.yaml`, but not used. """ if_key_missing = None architectures = Set(if_missing=None) @@ -158,44 +173,31 @@ if_key_missing = None -class ConfigBootSourceSelection(Schema): - """Configuration validator for boot source election onfiguration.""" +class BootSourceSelection(Schema): + """Configuration validator for boot source selection configuration.""" if_key_missing = None + os = String(if_missing="*") release = String(if_missing="*") arches = Set(if_missing=["*"]) subarches = Set(if_missing=['*']) labels = Set(if_missing=['*']) -class ConfigBootSource(Schema): +class BootSource(Schema): """Configuration validator for boot source configuration.""" if_key_missing = None - path = String( + url = String( if_missing="http://maas.ubuntu.com/images/ephemeral-v2/releases/") keyring = String( if_missing="/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg") + keyring_data = String(if_missing=None) selections = ForEach( - ConfigBootSourceSelection, - if_missing=[ConfigBootSourceSelection.to_python({})]) - - -class ConfigBoot(Schema): - """Configuration validator for boot configuration.""" - - if_key_missing = None - - storage = String(if_missing="/var/lib/maas/boot-resources/") - sources = ForEach( - ConfigBootSource, if_missing=[ConfigBootSource.to_python({})]) - - # Marker in the bootresources.yaml file: if True, the file has not been - # edited yet and needs to be either configured with initial choices, or - # rewritten based on previously downloaded boot images. - configure_me = Bool(if_missing=False) + BootSourceSelection, + if_missing=[BootSourceSelection.to_python({})]) class ConfigBase(Schema): @@ -334,13 +336,24 @@ boot = ConfigLegacyBoot -class BootConfig(ConfigBase): - """Configuration for boot resources.""" +class BootSources: + """Validator for a list of boot-source entries.""" - class __metaclass__(ConfigMeta): - envvar = "MAAS_BOOT_RESOURCES_SETTINGS" - default = "bootresources.yaml" + # Validator for a list of BootSource definitions. We can't make our own + # class for this. ForEach (which is how you construct a validator for a + # list of items) and our own ConfigBase each have their own metaclass, + # ruling out a combined inheritance pattern. So instead we duplicate + # small bits of ConfigBase code here, and make __getitem__ forward to the + # list validator. + sources = ForEach(BootSource()) - if_key_missing = None + @classmethod + def parse(cls, stream): + """Load sources spec from `stream`, as YAML, and validate.""" + return cls.sources.to_python(yaml.safe_load(stream)) - boot = ConfigBoot + @classmethod + def load(cls, filename): + """Load sources spec from `filename`, as YAML, and validate.""" + with open(filename, "rb") as stream: + return cls.parse(stream) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/configure_maas_url.py maas-1.7.6+bzr3376/src/provisioningserver/configure_maas_url.py --- maas-1.5.4+bzr2294/src/provisioningserver/configure_maas_url.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/configure_maas_url.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,123 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Management command: update `MAAS_URL`. + +The MAAS cluster controller packaging calls this in order to set a new +"MAAS URL" (the URL where nodes and cluster controllers can reach the +region controller) in the cluster controller's configuration files. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'add_arguments', + 'run', + ] + +from functools import partial +import re +from urlparse import urlparse + +from provisioningserver.utils.fs import ( + atomic_write, + read_text_file, + ) +from provisioningserver.utils.url import compose_URL + + +MAAS_CLUSTER_CONF = '/etc/maas/maas_cluster.conf' + +PSERV_YAML = '/etc/maas/pserv.yaml' + + +def rewrite_config_file(path, line_filter, mode=0600): + """Rewrite config file at `path` on a line-by-line basis. + + Reads the file at `path`, runs its lines through `line_filter`, and + writes the result back to `path`. + + Newlines may not be exactly as they were. A trailing newline is ensured. + + :param path: Path to the config file to be rewritten. + :param line_filter: A callable which accepts a line of input text (without + trailing newline), and returns the corresponding line of output text + (also without trailing newline). + :param mode: File access permissions for the newly written file. + """ + input_lines = read_text_file(path).splitlines() + output_lines = [line_filter(line) for line in input_lines] + result = '%s\n' % '\n'.join(output_lines) + atomic_write(result, path, mode=mode) + + +def update_maas_cluster_conf(url): + """Update `MAAS_URL` in `/etc/maas/maas_cluster.conf`. + + This file contains a shell-style assignment of the `MAAS_URL` + variable. Its assigned value will be changed to `url`. + """ + substitute_line = lambda line: ( + 'MAAS_URL="%s"' % url + if re.match('\s*MAAS_URL=', line) + else line) + rewrite_config_file(MAAS_CLUSTER_CONF, substitute_line, mode=0640) + + +def extract_host(url): + """Return just the host part of `url`.""" + return urlparse(url).hostname + + +def substitute_pserv_yaml_line(new_host, line): + match = re.match('(\s*generator:)\s+(\S*)(.*)$', line) + if match is None: + # Not the generator line. Keep as-is. + return line + [head, input_url, tail] = match.groups() + return "%s %s%s" % (head, compose_URL(input_url, new_host), tail) + + +def update_pserv_yaml(host): + """Update `generator` in `/etc/maas/pserv.yaml`. + + This file contains a YAML line defining a `generator` URL. The line must + look something like:: + + generator: http://10.9.8.7/MAAS/api/1.0/pxeconfig/ + + The host part of the URL (in this example, `10.9.8.7`) will be replaced + with the new `host`. If `host` is an IPv6 address, this function will + ensure that it is surrounded by square brackets. + """ + substitute_line = partial(substitute_pserv_yaml_line, host) + rewrite_config_file(PSERV_YAML, substitute_line, mode=0644) + + +def add_arguments(parser): + """Add this command's options to the `ArgumentParser`. + + Specified by the `ActionScript` interface. + """ + parser.add_argument( + 'maas_url', metavar='URL', + help=( + "URL where nodes and cluster controllers can reach the MAAS " + "region controller.")) + + +def run(args): + """Update MAAS_URL setting in configuration files. + + For use by the MAAS packaging scripts. Updates configuration files + to reflect a new MAAS_URL setting. + """ + update_maas_cluster_conf(args.maas_url) + update_pserv_yaml(extract_host(args.maas_url)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/seamicro.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/seamicro.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/seamicro.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/seamicro.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,326 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'power_control_seamicro15k_v09', - 'power_control_seamicro15k_v2', - 'probe_seamicro15k_and_enlist', - ] - -import httplib -import json -import logging -import time -import urllib2 -import urlparse - -import provisioningserver.custom_hardware.utils as utils -from seamicroclient.v2 import ( - client as seamicro_client, - ) -from seamicroclient import ( - exceptions as seamicro_exceptions, - ) - - -logger = logging.getLogger(__name__) - - -class POWER_STATUS: - ON = 'Power-On' - OFF = 'Power-Off' - RESET = 'Reset' - - -class SeaMicroError(Exception): - """Failure talking to a SeaMicro chassis controller. """ - pass - - -class SeaMicroAPIV09Error(SeaMicroError): - """Failure talking to a SeaMicro API v0.9. """ - - def __init__(self, msg, response_code=None): - super(SeaMicroAPIV09Error, self).__init__(msg) - self.response_code = response_code - - -class SeaMicroAPIV09(object): - allowed_codes = [httplib.OK, httplib.ACCEPTED, httplib.NOT_MODIFIED] - - def __init__(self, url): - """ - :param url: The URL of the seamicro chassis, e.g.: http://seamciro/v0.9 - :type url: string - """ - self.url = url - self.token = None - - def build_url(self, location, params=None): - """Builds an order-dependent url, as the SeaMicro chassis - requires order-dependent parameters. - """ - if params is None: - params = [] - params = filter(None, params) - return urlparse.urljoin(self.url, location) + '?' + '&'.join(params) - - def parse_response(self, url, response): - """Parses the HTTP response, checking for errors - from the SeaMicro chassis. - """ - if response.getcode() not in self.allowed_codes: - raise SeaMicroAPIV09Error( - "got response code %s" % response.getcode(), - response_code=response.getcode()) - text = response.read() - - # Decode the response, it should be json. If not - # handle that case and set json_data to None, so - # a SeaMicroAPIV09Error can be raised. - try: - json_data = json.loads(text) - except ValueError: - json_data = None - - if not json_data: - raise SeaMicroAPIV09Error( - 'No JSON data found from %s: got %s' % (url, text)) - json_rpc_code = int(json_data['error']['code']) - if json_rpc_code not in self.allowed_codes: - raise SeaMicroAPIV09Error( - 'Got JSON RPC error code %d: %s for %s' % ( - json_rpc_code, - httplib.responses.get(json_rpc_code, 'Unknown!'), - url), - response_code=json_rpc_code) - return json_data - - def get(self, location, params=None): - """Dispatch a GET request to a SeaMicro chassis. - - The seamicro box has order-dependent HTTP parameters, so we build - our own get URL, and use a list vs. a dict for data, as the order is - implicit. - """ - url = self.build_url(location, params) - response = urllib2.urlopen(url) - json_data = self.parse_response(url, response) - - return json_data['result'] - - def put(self, location, params=None): - """Dispatch a PUT request to a SeaMicro chassis. - - The seamicro box has order-dependent HTTP parameters, so we build - our own get URL, and use a list vs. a dict for data, as the order is - implicit. - """ - opener = urllib2.build_opener(urllib2.HTTPHandler) - url = self.build_url(location, params) - request = urllib2.Request(url) - request.get_method = lambda: 'PUT' - request.add_header('content-type', 'text/json') - response = opener.open(request) - json_data = self.parse_response(url, response) - - return json_data['result'] - - def is_logged_in(self): - return self.token is not None - - def login(self, username, password): - if not self.is_logged_in(): - self.token = self.get("login", [username, password]) - - def logout(self): - if self.is_logged_in(): - self.get("logout") - self.token = None - - def servers_all(self): - return self.get("servers/all", [self.token]) - - def servers(self): - return self.get("servers", [self.token]) - - def server_index(self, server_id): - """API v0.9 uses arbitrary indexing, this function converts a server - id to an index that can be used for detailed outputs & commands. - """ - servers = self.servers()['serverId'] - for idx, name in servers.items(): - if name == server_id: - return idx - return None - - def power_server(self, server_id, new_status, do_pxe=False, force=False): - idx = self.server_index(server_id) - if idx is None: - raise SeaMicroAPIV09Error( - 'Failed to retrieve server index, ' - 'invalid server_id: %s' % server_id) - - location = 'servers/%s' % idx - params = ['action=%s' % new_status] - if new_status in [POWER_STATUS.ON, POWER_STATUS.RESET]: - if do_pxe: - params.append("using-pxe=true") - else: - params.append("using-pxe=false") - elif new_status in [POWER_STATUS.OFF]: - if force: - params.append("force=true") - else: - params.append("force=false") - else: - raise SeaMicroAPIV09Error('Invalid power action: %s' % new_status) - - params.append(self.token) - self.put(location, params=params) - return True - - def power_on(self, server_id, do_pxe=False): - return self.power_server(server_id, POWER_STATUS.ON, do_pxe=do_pxe) - - def power_off(self, server_id, force=False): - return self.power_server(server_id, POWER_STATUS.OFF, force=force) - - def reset(self, server_id, do_pxe=False): - return self.power_server(server_id, POWER_STATUS.RESET, do_pxe=do_pxe) - - -def get_seamicro15k_api(version, ip, username, password): - """Gets the api client depending on the version. - Supports v0.9 and v2.0. - - :returns: api for version, None if version not supported - """ - if version == 'v0.9': - api = SeaMicroAPIV09('http://%s/v0.9/' % ip) - try: - api.login(username, password) - except urllib2.URLError: - # Cannot reach using v0.9, might not be supported - return None - return api - elif version == 'v2.0': - url = 'http://%s/v2.0' % ip - try: - api = seamicro_client.Client( - auth_url=url, username=username, password=password) - except seamicro_exceptions.ConnectionRefused: - # Cannot reach using v2.0, might no be supported - return None - return api - - -def get_seamicro15k_servers(version, ip, username, password): - """Gets a list of tuples containing (server_id, mac_address) from the - sm15k api version. Supports v0.9 and v2.0. - - :returns: list of (server_id, mac_address), None if version not supported - """ - api = get_seamicro15k_api(version, ip, username, password) - if api: - if version == 'v0.9': - return ( - (server['serverId'].split('/')[0], server['serverMacAddr']) - for server in - api.servers_all().values() - # There are 8 network cards attached to these boxes, we only - # use NIC 0 for PXE booting. - if server['serverNIC'] == '0' - ) - elif version == 'v2.0': - servers = [] - for server in api.servers.list(): - id = server.id.split('/')[0] - macs = [nic['macAddr'] for nic in server.nic.values()] - servers.append((id, macs)) - return servers - return None - - -def select_seamicro15k_api_version(power_control): - """Returns the lastest api version to use.""" - if power_control == 'ipmi': - return ['v2.0', 'v0.9'] - if power_control == 'restapi': - return ['v0.9'] - if power_control == 'restapi2': - return ['v2.0'] - raise SeaMicroError( - 'Unsupported power control method: %s.' % power_control) - - -def find_seamicro15k_servers(ip, username, password, power_control): - """Returns the list of servers, using the latest supported api version.""" - api_versions = select_seamicro15k_api_version(power_control) - for version in api_versions: - servers = get_seamicro15k_servers(version, ip, username, password) - if servers is not None: - return servers - raise SeaMicroError('Failure to retrieve servers.') - - -def probe_seamicro15k_and_enlist(ip, username, password, power_control=None): - power_control = power_control or 'ipmi' - - servers = find_seamicro15k_servers(ip, username, password, power_control) - for system_id, mac in servers: - params = { - 'power_address': ip, - 'power_user': username, - 'power_pass': password, - 'power_control': power_control, - 'system_id': system_id - } - - utils.create_node(mac, 'amd64', 'sm15k', params) - - -def power_control_seamicro15k_v09(ip, username, password, server_id, - power_change, retry_count=5, retry_wait=1): - server_id = '%s/0' % server_id - api = SeaMicroAPIV09('http://%s/v0.9/' % ip) - - while retry_count > 0: - api.login(username, password) - try: - if power_change == "on": - api.power_on(server_id, do_pxe=True) - elif power_change == "off": - api.power_off(server_id, force=True) - except SeaMicroAPIV09Error as e: - # Chance that multiple login's are at once, the api - # only supports one at a time. So lets try again after - # a second, up to max retry count. - if e.response_code == 401: - retry_count -= 1 - time.sleep(retry_wait) - continue - else: - raise - break - - -def power_control_seamicro15k_v2(ip, username, password, server_id, - power_change): - server_id = '%s/0' % server_id - api = get_seamicro15k_api('v2.0', ip, username, password) - if api: - server = api.servers.get(server_id) - if power_change == "on": - server.power_on(using_pxe=True) - elif power_change == "off": - server.power_off(force=True) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/tests/test_seamicro.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/tests/test_seamicro.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/tests/test_seamicro.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/tests/test_seamicro.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,467 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for `provisioningserver.custom_hardware.seamicro`. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import json -import urlparse - -from maastesting.factory import factory -from maastesting.matchers import ( - MockCalledOnceWith, - MockCalledWith, - MockCallsMatch, - ) -from maastesting.testcase import MAASTestCase -from mock import ( - call, - Mock, - ) -from provisioningserver.custom_hardware.seamicro import ( - find_seamicro15k_servers, - power_control_seamicro15k_v09, - power_control_seamicro15k_v2, - POWER_STATUS, - probe_seamicro15k_and_enlist, - SeaMicroAPIV09, - SeaMicroAPIV09Error, - SeaMicroError, - select_seamicro15k_api_version, - ) -import provisioningserver.custom_hardware.utils - - -class FakeResponse(object): - - def __init__(self, response_code, response, is_json=False): - self.response_code = response_code - self.response = response - if is_json: - self.response = json.dumps(response) - - def getcode(self): - return self.response_code - - def read(self): - return self.response - - -class FakeServer(object): - - def __init__(self, id): - self.id = id - self.nic = {} - - def add_fake_nic(self, id): - self.nic[id] = {'macAddr': factory.getRandomMACAddress()} - - def get_fake_macs(self): - return [nic['macAddr'] for nic in self.nic.values()] - - -class FakeSeaMicroServerManager(object): - - def __init__(self): - self.servers = [] - - def get(self, server_id): - for server in self.servers: - if server_id == server.id: - return server - return None - - def list(self): - return self.servers - - -class FakeSeaMicroClient(object): - pass - - -class TestSeaMicroAPIV09(MAASTestCase): - """Tests for SeaMicroAPIV09.""" - - def test_build_url(self): - url = factory.getRandomString() - api = SeaMicroAPIV09('http://%s/' % url) - location = factory.getRandomString() - params = [factory.getRandomString() for _ in range(3)] - output = api.build_url(location, params) - parsed = urlparse.urlparse(output) - self.assertEqual(url, parsed.netloc) - self.assertEqual(location, parsed.path.split('/')[1]) - self.assertEqual(params, parsed.query.split('&')) - - def test_invalid_reponse_code(self): - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - response = FakeResponse(401, 'Unauthorized') - self.assertRaises( - SeaMicroAPIV09Error, api.parse_response, - url, response) - - def test_invalid_json_response(self): - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - response = FakeResponse(200, factory.getRandomString()) - self.assertRaises( - SeaMicroAPIV09Error, api.parse_response, - url, response) - - def test_json_error_response(self): - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - data = { - 'error': { - 'code': 401 - } - } - response = FakeResponse(200, data, is_json=True) - self.assertRaises( - SeaMicroAPIV09Error, api.parse_response, - url, response) - - def test_json_valid_response(self): - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - output = factory.getRandomString() - data = { - 'error': { - 'code': 200 - }, - 'result': { - 'data': output - }, - } - response = FakeResponse(200, data, is_json=True) - result = api.parse_response(url, response) - self.assertEqual(output, result['result']['data']) - - def configure_get_result(self, result=None): - self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, 'get', - Mock(return_value=result)) - - def test_login_and_logout(self): - token = factory.getRandomString() - self.configure_get_result(token) - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - api.login('username', 'password') - self.assertEqual(token, api.token) - api.logout() - self.assertIsNone(api.token) - - def test_get_server_index(self): - result = { - 'serverId': { - 0: '0/0', - 1: '1/0', - 2: '2/0', - } - } - self.configure_get_result(result) - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - self.assertEqual(0, api.server_index('0/0')) - self.assertEqual(1, api.server_index('1/0')) - self.assertEqual(2, api.server_index('2/0')) - self.assertIsNone(api.server_index('3/0')) - - def configure_put_server_power(self, token=None): - result = { - 'serverId': { - 0: '0/0', - } - } - self.configure_get_result(result) - mock = self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, - 'put') - url = 'http://%s/' % factory.getRandomString() - api = SeaMicroAPIV09(url) - api.token = token - return mock, api - - def assert_put_power_called(self, mock, idx, new_status, *params): - location = 'servers/%d' % idx - params = ['action=%s' % new_status] + list(params) - self.assertThat(mock, MockCalledOnceWith(location, params=params)) - - def test_put_server_power_on_using_pxe(self): - token = factory.getRandomString() - mock, api = self.configure_put_server_power(token) - api.power_on('0/0', do_pxe=True) - self.assert_put_power_called( - mock, 0, POWER_STATUS.ON, 'using-pxe=true', token) - - def test_put_server_power_on_not_using_pxe(self): - token = factory.getRandomString() - mock, api = self.configure_put_server_power(token) - api.power_on('0/0', do_pxe=False) - self.assert_put_power_called( - mock, 0, POWER_STATUS.ON, 'using-pxe=false', token) - - def test_put_server_power_reset_using_pxe(self): - token = factory.getRandomString() - mock, api = self.configure_put_server_power(token) - api.reset('0/0', do_pxe=True) - self.assert_put_power_called( - mock, 0, POWER_STATUS.RESET, 'using-pxe=true', token) - - def test_put_server_power_reset_not_using_pxe(self): - token = factory.getRandomString() - mock, api = self.configure_put_server_power(token) - api.reset('0/0', do_pxe=False) - self.assert_put_power_called( - mock, 0, POWER_STATUS.RESET, 'using-pxe=false', token) - - def test_put_server_power_off(self): - token = factory.getRandomString() - mock, api = self.configure_put_server_power(token) - api.power_off('0/0', force=False) - self.assert_put_power_called( - mock, 0, POWER_STATUS.OFF, 'force=false', token) - - def test_put_server_power_off_force(self): - token = factory.getRandomString() - mock, api = self.configure_put_server_power(token) - api.power_off('0/0', force=True) - self.assert_put_power_called( - mock, 0, POWER_STATUS.OFF, 'force=true', token) - - -class TestSeaMicro(MAASTestCase): - """Tests for SeaMicro custom hardware.""" - - def test_select_seamicro15k_api_version_ipmi(self): - versions = select_seamicro15k_api_version('ipmi') - self.assertEqual(['v2.0', 'v0.9'], versions) - - def test_select_seamicro15k_api_version_restapi(self): - versions = select_seamicro15k_api_version('restapi') - self.assertEqual(['v0.9'], versions) - - def test_select_seamicro15k_api_version_restapi2(self): - versions = select_seamicro15k_api_version('restapi2') - self.assertEqual(['v2.0'], versions) - - def configure_get_seamicro15k_api(self, return_value=None): - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - mock = self.patch( - provisioningserver.custom_hardware.seamicro, - 'get_seamicro15k_api') - mock.return_value = return_value - return mock, ip, username, password - - def test_find_seamicro15k_servers_impi(self): - mock, ip, username, password = self.configure_get_seamicro15k_api() - self.assertRaises( - SeaMicroError, find_seamicro15k_servers, ip, username, - password, 'ipmi') - self.assertThat( - mock, - MockCallsMatch( - call('v2.0', ip, username, password), - call('v0.9', ip, username, password))) - - def test_find_seamicro15k_servers_restapi(self): - mock, ip, username, password = self.configure_get_seamicro15k_api() - self.assertRaises( - SeaMicroError, find_seamicro15k_servers, ip, username, - password, 'restapi') - self.assertThat( - mock, MockCalledOnceWith('v0.9', ip, username, password)) - - def test_find_seamicro15k_servers_restapi2(self): - mock, ip, username, password = self.configure_get_seamicro15k_api() - self.assertRaises( - SeaMicroError, find_seamicro15k_servers, ip, username, - password, 'restapi2') - self.assertThat( - mock, MockCalledOnceWith('v2.0', ip, username, password)) - - def configure_api_v09_login(self, token=None): - token = token or factory.getRandomString() - mock = self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, - 'login') - mock.return_value = token - return mock - - def test_probe_seamicro15k_and_enlist_v09(self): - self.configure_api_v09_login() - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - result = { - 0: { - 'serverId': '0/0', - 'serverNIC': '0', - 'serverMacAddr': factory.getRandomMACAddress(), - }, - 1: { - 'serverId': '1/0', - 'serverNIC': '0', - 'serverMacAddr': factory.getRandomMACAddress(), - }, - 2: { - 'serverId': '2/0', - 'serverNIC': '0', - 'serverMacAddr': factory.getRandomMACAddress(), - }, - 3: { - 'serverId': '3/1', - 'serverNIC': '1', - 'serverMacAddr': factory.getRandomMACAddress(), - }, - } - self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, 'get', - Mock(return_value=result)) - mock_create_node = self.patch( - provisioningserver.custom_hardware.utils, - 'create_node') - - probe_seamicro15k_and_enlist( - ip, username, password, power_control='restapi') - self.assertEqual(3, mock_create_node.call_count) - - last = result[2] - power_params = { - 'power_control': 'restapi', - 'system_id': last['serverId'].split('/')[0], - 'power_address': ip, - 'power_pass': password, - 'power_user': username - } - self.assertThat( - mock_create_node, - MockCalledWith( - last['serverMacAddr'], 'amd64', - 'sm15k', power_params)) - - def test_power_control_seamicro15k_v09(self): - self.configure_api_v09_login() - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - mock = self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, - 'power_server') - - power_control_seamicro15k_v09(ip, username, password, '25', 'on') - self.assertThat( - mock, - MockCalledOnceWith('25/0', POWER_STATUS.ON, do_pxe=True)) - - def test_power_control_seamicro15k_v09_retry_failure(self): - self.configure_api_v09_login() - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - mock = self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, - 'power_server') - mock.side_effect = SeaMicroAPIV09Error("mock error", response_code=401) - - power_control_seamicro15k_v09( - ip, username, password, '25', 'on', - retry_count=5, retry_wait=0) - self.assertEqual(5, mock.call_count) - - def test_power_control_seamicro15k_v09_exception_failure(self): - self.configure_api_v09_login() - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - mock = self.patch( - provisioningserver.custom_hardware.seamicro.SeaMicroAPIV09, - 'power_server') - mock.side_effect = SeaMicroAPIV09Error("mock error") - - self.assertRaises( - SeaMicroAPIV09Error, power_control_seamicro15k_v09, - ip, username, password, '25', 'on') - - def test_probe_seamicro15k_and_enlist_v2(self): - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - - fake_server_0 = FakeServer('0/0') - fake_server_0.add_fake_nic('0') - fake_server_0.add_fake_nic('1') - fake_server_1 = FakeServer('1/0') - fake_server_1.add_fake_nic('0') - fake_server_1.add_fake_nic('1') - fake_client = FakeSeaMicroClient() - fake_client.servers = FakeSeaMicroServerManager() - fake_client.servers.servers.append(fake_server_0) - fake_client.servers.servers.append(fake_server_1) - mock_get_api = self.patch( - provisioningserver.custom_hardware.seamicro, - 'get_seamicro15k_api') - mock_get_api.return_value = fake_client - mock_create_node = self.patch( - provisioningserver.custom_hardware.utils, - 'create_node') - - probe_seamicro15k_and_enlist( - ip, username, password, power_control='restapi2') - self.assertEqual(2, mock_create_node.call_count) - - self.assertThat( - mock_create_node, - MockCallsMatch( - call( - fake_server_0.get_fake_macs(), 'amd64', 'sm15k', - { - 'power_control': 'restapi2', - 'system_id': '0', - 'power_address': ip, - 'power_pass': password, - 'power_user': username - }), - call( - fake_server_1.get_fake_macs(), 'amd64', 'sm15k', - { - 'power_control': 'restapi2', - 'system_id': '1', - 'power_address': ip, - 'power_pass': password, - 'power_user': username - }))) - - def test_power_control_seamicro15k_v2(self): - ip = factory.getRandomIPAddress() - username = factory.getRandomString() - password = factory.getRandomString() - - fake_server = FakeServer('0/0') - fake_client = FakeSeaMicroClient() - fake_client.servers = FakeSeaMicroServerManager() - fake_client.servers.servers.append(fake_server) - mock_power_on = self.patch(fake_server, 'power_on') - - mock_get_api = self.patch( - provisioningserver.custom_hardware.seamicro, - 'get_seamicro15k_api') - mock_get_api.return_value = fake_client - - power_control_seamicro15k_v2(ip, username, password, '0', 'on') - mock_power_on.assert_called() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/tests/test_ucsm.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/tests/test_ucsm.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/tests/test_ucsm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/tests/test_ucsm.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,638 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for ``provisioningserver.custom_hardware.ucsm``.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from itertools import permutations -import random -from StringIO import StringIO -import urllib2 - -from lxml.etree import ( - Element, - SubElement, - XML, - ) -from maastesting.factory import factory -from maastesting.matchers import ( - MockCalledOnceWith, - MockCallsMatch, - MockNotCalled, - ) -from maastesting.testcase import MAASTestCase -from mock import ( - ANY, - call, - Mock, - ) -from provisioningserver.custom_hardware import ( - ucsm, - utils, - ) -from provisioningserver.custom_hardware.ucsm import ( - get_children, - get_first_booter, - get_macs, - get_power_command, - get_server_power_control, - get_servers, - get_service_profile, - logged_in, - make_policy_change, - make_request_data, - parse_response, - power_control_ucsm, - probe_and_enlist_ucsm, - probe_servers, - RO_KEYS, - set_lan_boot_default, - set_server_power_control, - strip_ro_keys, - UCSM_XML_API, - UCSM_XML_API_Error, - ) - - -def make_api(url='http://url', user='u', password='p', - cookie='foo', mock_call=True): - api = UCSM_XML_API(url, user, password) - api.cookie = cookie - return api - - -def make_api_patch_call(testcase, *args, **kwargs): - api = make_api(*args, **kwargs) - mock = testcase.patch(api, '_call') - return api, mock - - -def make_fake_result(root_class, child_tag, container='outConfigs'): - fake_result = Element(root_class) - outConfigs = SubElement(fake_result, container) - outConfigs.append(Element(child_tag)) - return outConfigs - - -def make_class(): - return factory.make_name('class') - - -def make_dn(): - return factory.make_name('dn') - - -def make_server(): - return factory.make_name('server') - - -class TestUCSMXMLAPIError(MAASTestCase): - """Tests for ``UCSM_XML_API_Error``.""" - - def test_includes_code_and_msg(self): - def raise_error(): - raise UCSM_XML_API_Error('bad', 4224) - - error = self.assertRaises(UCSM_XML_API_Error, raise_error) - - self.assertEqual('bad', error.args[0]) - self.assertEqual(4224, error.code) - - -class TestMakeRequestData(MAASTestCase): - """Tests for ``make_request_data``.""" - - def test_no_children(self): - fields = {'hello': 'there'} - request_data = make_request_data('foo', fields) - root = XML(request_data) - self.assertEqual('foo', root.tag) - self.assertEqual('there', root.get('hello')) - - def test_with_children(self): - fields = {'hello': 'there'} - children_tags = ['bar', 'baz'] - children = [Element(child_tag) for child_tag in children_tags] - request_data = make_request_data('foo', fields, children) - root = XML(request_data) - self.assertEqual('foo', root.tag) - self.assertItemsEqual(children_tags, (e.tag for e in root)) - - def test_no_fields(self): - request_data = make_request_data('foo') - root = XML(request_data) - self.assertEqual('foo', root.tag) - - -class TestParseResonse(MAASTestCase): - """Tests for ``parse_response``.""" - - def test_no_error(self): - xml = '' - response = parse_response(xml) - self.assertEqual('foo', response.tag) - - def test_error(self): - xml = '' - self.assertRaises(UCSM_XML_API_Error, parse_response, xml) - - -class TestLogin(MAASTestCase): - """"Tests for ``UCSM_XML_API.login``.""" - - def test_login_assigns_cookie(self): - cookie = 'chocolate chip' - api, mock = make_api_patch_call(self) - mock.return_value = Element('aaaLogin', {'outCookie': cookie}) - api.login() - self.assertEqual(cookie, api.cookie) - - def test_login_call_parameters(self): - user = 'user' - password = 'pass' - api, mock = make_api_patch_call(self, user=user, password=password) - api.login() - fields = {'inName': user, 'inPassword': password} - self.assertThat(mock, MockCalledOnceWith('aaaLogin', fields)) - - -class TestLogout(MAASTestCase): - """"Tests for ``UCSM_XML_API.logout``.""" - - def test_logout_clears_cookie(self): - api = make_api() - self.patch(api, '_call') - api.logout() - self.assertIsNone(api.cookie) - - def test_logout_uses_cookie(self): - api, mock = make_api_patch_call(self) - cookie = api.cookie - api.logout() - fields = {'inCookie': cookie} - self.assertThat(mock, MockCalledOnceWith('aaaLogout', fields)) - - -class TestConfigResolveClass(MAASTestCase): - """"Tests for ``UCSM_XML_API.config_resolve_class``.""" - - def test_no_filters(self): - class_id = make_class() - api, mock = make_api_patch_call(self) - api.config_resolve_class(class_id) - fields = {'cookie': api.cookie, 'classId': class_id} - self.assertThat(mock, MockCalledOnceWith('configResolveClass', fields, - ANY)) - - def test_with_filters(self): - class_id = make_class() - filter_element = Element('hi') - api, mock = make_api_patch_call(self) - api.config_resolve_class(class_id, [filter_element]) - in_filters = mock.call_args[0][2] - self.assertEqual([filter_element], in_filters[0][:]) - - def test_return_response(self): - api, mock = make_api_patch_call(self) - mock.return_value = Element('test') - result = api.config_resolve_class('c') - self.assertEqual(mock.return_value, result) - - -class TestConfigResolveChildren(MAASTestCase): - """"Tests for ``UCSM_XML_API.config_resolve_children``.""" - - def test_parameters(self): - dn = make_dn() - class_id = make_class() - api, mock = make_api_patch_call(self) - api.config_resolve_children(dn, class_id) - fields = {'inDn': dn, 'classId': class_id, 'cookie': api.cookie} - self.assertThat(mock, - MockCalledOnceWith('configResolveChildren', fields)) - - def test_no_class_id(self): - dn = make_dn() - api, mock = make_api_patch_call(self) - api.config_resolve_children(dn) - fields = {'inDn': dn, 'cookie': api.cookie} - self.assertThat(mock, - MockCalledOnceWith('configResolveChildren', fields)) - - def test_return_response(self): - api, mock = make_api_patch_call(self) - mock.return_value = Element('test') - result = api.config_resolve_children('d', 'c') - self.assertEqual(mock.return_value, result) - - -class TestConfigConfMo(MAASTestCase): - """"Tests for ``UCSM_XML_API.config_conf_mo``.""" - - def test_parameters(self): - dn = make_dn() - config_items = [Element('hi')] - api, mock = make_api_patch_call(self) - api.config_conf_mo(dn, config_items) - fields = {'dn': dn, 'cookie': api.cookie} - self.assertThat(mock, MockCalledOnceWith('configConfMo', fields, ANY)) - in_configs = mock.call_args[0][2] - self.assertEqual(config_items, in_configs[0][:]) - - -class TestCall(MAASTestCase): - """"Tests for ``UCSM_XML_API._call``.""" - - def test_call(self): - name = 'method' - fields = {1: 2} - children = [3, 4] - request = '' - response = Element('good') - api = make_api() - - mock_make_request_data = self.patch(ucsm, 'make_request_data') - mock_make_request_data.return_value = request - - mock_send_request = self.patch(api, '_send_request') - mock_send_request.return_value = response - - api._call(name, fields, children) - self.assertThat(mock_make_request_data, - MockCalledOnceWith(name, fields, children)) - self.assertThat(mock_send_request, MockCalledOnceWith(request)) - - -class TestSendRequest(MAASTestCase): - """"Tests for ``UCSM_XML_API._send_request``.""" - - def test_send_request(self): - request_data = 'foo' - api = make_api() - self.patch(api, '_call') - stream = StringIO('') - mock = self.patch(urllib2, 'urlopen') - mock.return_value = stream - response = api._send_request(request_data) - self.assertEqual('hi', response.tag) - urllib_request = mock.call_args[0][0] - self.assertEqual(request_data, urllib_request.data) - - -class TestConfigResolveDn(MAASTestCase): - """Tests for ``UCSM_XML_API.config_resolve_dn``.""" - - def test_parameters(self): - api, mock = make_api_patch_call(self) - test_dn = make_dn() - fields = {'cookie': api.cookie, 'dn': test_dn} - api.config_resolve_dn(test_dn) - self.assertThat(mock, - MockCalledOnceWith('configResolveDn', fields)) - - -class TestGetServers(MAASTestCase): - """Tests for ``get_servers``.""" - - def test_uses_uuid(self): - uuid = factory.getRandomUUID() - api = make_api() - mock = self.patch(api, 'config_resolve_class') - get_servers(api, uuid) - filters = mock.call_args[0][1] - attrib = {'class': 'computeItem', 'property': 'uuid', 'value': uuid} - self.assertEqual(attrib, filters[0].attrib) - - def test_returns_result(self): - uuid = factory.getRandomUUID() - api = make_api() - fake_result = make_fake_result('configResolveClass', 'found') - self.patch(api, 'config_resolve_class').return_value = fake_result - result = get_servers(api, uuid) - self.assertEqual('found', result[0].tag) - - def test_class_id(self): - uuid = factory.getRandomUUID() - api = make_api() - mock = self.patch(api, 'config_resolve_class') - get_servers(api, uuid) - self.assertThat(mock, MockCalledOnceWith('computeItem', ANY)) - - -class TestGetChildren(MAASTestCase): - """Tests for ``get_children``.""" - - def test_returns_result(self): - search_class = make_class() - api = make_api() - fake_result = make_fake_result('configResolveChildren', search_class) - self.patch(api, 'config_resolve_children').return_value = fake_result - in_element = Element('test', {'dn': make_dn()}) - class_id = search_class - result = get_children(api, in_element, class_id) - self.assertEqual(search_class, result[0].tag) - - def test_parameters(self): - search_class = make_class() - parent_dn = make_dn() - api = make_api() - mock = self.patch(api, 'config_resolve_children') - in_element = Element('test', {'dn': parent_dn}) - class_id = search_class - get_children(api, in_element, class_id) - self.assertThat(mock, MockCalledOnceWith(parent_dn, search_class)) - - -class TestGetMacs(MAASTestCase): - """Tests for ``get_macs``.""" - - def test_gets_adaptors(self): - adaptor = 'adaptor' - server = make_server() - mac = 'xx' - api = make_api() - mock = self.patch(ucsm, 'get_children') - - def fake_get_children(api, element, class_id): - if class_id == 'adaptorUnit': - return [adaptor] - elif class_id == 'adaptorHostEthIf': - return [Element('ethif', {'mac': mac})] - - mock.side_effect = fake_get_children - macs = get_macs(api, server) - self.assertThat(mock, MockCallsMatch( - call(api, server, 'adaptorUnit'), - call(api, adaptor, 'adaptorHostEthIf'))) - self.assertEqual([mac], macs) - - -class TestProbeServers(MAASTestCase): - """Tests for ``probe_servers``.""" - - def test_uses_api(self): - api = make_api() - mock = self.patch(ucsm, 'get_servers') - probe_servers(api) - self.assertThat(mock, MockCalledOnceWith(api)) - - def test_returns_results(self): - servers = [{'uuid': factory.getRandomUUID()}] - mac = 'mac' - api = make_api() - self.patch(ucsm, 'get_servers').return_value = servers - self.patch(ucsm, 'get_macs').return_value = [mac] - server_list = probe_servers(api) - self.assertEqual([(servers[0], [mac])], server_list) - - -class TestGetServerPowerControl(MAASTestCase): - """Tests for ``get_server_power_control``.""" - - def test_get_server_power_control(self): - api = make_api() - mock = self.patch(api, 'config_resolve_children') - fake_result = make_fake_result('configResolveChildren', 'lsPower') - mock.return_value = fake_result - dn = make_dn() - server = Element('computeItem', {'assignedToDn': dn}) - power_control = get_server_power_control(api, server) - self.assertThat(mock, MockCalledOnceWith(dn, 'lsPower')) - self.assertEqual('lsPower', power_control.tag) - - -class TestSetServerPowerControl(MAASTestCase): - """Tests for ``set_server_power_control``.""" - - def test_set_server_power_control(self): - api = make_api() - power_dn = make_dn() - power_control = Element('lsPower', {'dn': power_dn}) - config_conf_mo_mock = self.patch(api, 'config_conf_mo') - state = 'state' - set_server_power_control(api, power_control, state) - self.assertThat(config_conf_mo_mock, MockCalledOnceWith(power_dn, ANY)) - power_change = config_conf_mo_mock.call_args[0][1][0] - self.assertEqual(power_change.tag, 'lsPower') - self.assertEqual({'state': state, 'dn': power_dn}, power_change.attrib) - - -class TestLoggedIn(MAASTestCase): - """Tests for ``logged_in``.""" - - def test_logged_in(self): - mock = self.patch(ucsm, 'UCSM_XML_API') - url = 'url' - username = 'username' - password = 'password' - mock.return_value = Mock() - - with logged_in(url, username, password) as api: - self.assertEqual(mock.return_value, api) - self.assertThat(api.login, MockCalledOnceWith()) - - self.assertThat(mock.return_value.logout, MockCalledOnceWith()) - - -class TestValidGetPowerCommand(MAASTestCase): - scenarios = [ - ('Power On', dict( - power_mode='on', current_state='down', command='admin-up')), - ('Power On', dict( - power_mode='on', current_state='up', command='cycle-immediate')), - ('Power Off', dict( - power_mode='off', current_state='up', command='admin-down')), - ] - - def test_get_power_command(self): - command = get_power_command(self.power_mode, self.current_state) - self.assertEqual(self.command, command) - - -class TestInvalidGetPowerCommand(MAASTestCase): - - def test_get_power_command_raises_assertion_error_on_bad_power_mode(self): - bad_power_mode = factory.make_name('unlikely') - error = self.assertRaises(AssertionError, get_power_command, - bad_power_mode, None) - self.assertIn(bad_power_mode, error.args[0]) - - -class TestPowerControlUCSM(MAASTestCase): - """Tests for ``power_control_ucsm``.""" - - def test_power_control_ucsm(self): - uuid = factory.getRandomUUID() - api = Mock() - self.patch(ucsm, 'UCSM_XML_API').return_value = api - get_servers_mock = self.patch(ucsm, 'get_servers') - server = make_server() - state = 'admin-down' - power_control = Element('lsPower', {'state': state}) - get_servers_mock.return_value = [server] - get_server_power_control_mock = self.patch(ucsm, - 'get_server_power_control') - get_server_power_control_mock.return_value = power_control - set_server_power_control_mock = self.patch(ucsm, - 'set_server_power_control') - power_control_ucsm('url', 'username', 'password', uuid, - 'off') - self.assertThat(get_servers_mock, MockCalledOnceWith(api, uuid)) - self.assertThat(set_server_power_control_mock, - MockCalledOnceWith(api, power_control, state)) - - -class TestProbeAndEnlistUCSM(MAASTestCase): - """Tests for ``probe_and_enlist_ucsm``.""" - - def test_probe_and_enlist(self): - url = 'url' - username = 'username' - password = 'password' - api = Mock() - self.patch(ucsm, 'UCSM_XML_API').return_value = api - server_element = {'uuid': 'uuid'} - server = (server_element, ['mac'],) - probe_servers_mock = self.patch(ucsm, 'probe_servers') - probe_servers_mock.return_value = [server] - set_lan_boot_default_mock = self.patch(ucsm, 'set_lan_boot_default') - create_node_mock = self.patch(utils, 'create_node') - probe_and_enlist_ucsm(url, username, password) - self.assertThat(set_lan_boot_default_mock, - MockCalledOnceWith(api, server_element)) - self.assertThat(probe_servers_mock, MockCalledOnceWith(api)) - params = { - 'power_address': url, - 'power_user': username, - 'power_pass': password, - 'uuid': server[0]['uuid'] - } - self.assertThat(create_node_mock, - MockCalledOnceWith(server[1], 'amd64', 'ucsm', params)) - - -class TestGetServiceProfile(MAASTestCase): - """Tests for ``get_service_profile.``""" - - def test_get_service_profile(self): - test_dn = make_dn() - server = Element('computeBlade', {'assignedToDn': test_dn}) - api = make_api() - mock = self.patch(api, 'config_resolve_dn') - mock.return_value = make_fake_result('configResolveDn', 'lsServer', - 'outConfig') - service_profile = get_service_profile(api, server) - self.assertThat(mock, MockCalledOnceWith(test_dn)) - self.assertEqual(mock.return_value[0], service_profile) - - -def make_boot_order_scenarios(size): - minimum = random.randint(0, 500) - ordinals = xrange(minimum, minimum + size) - - elements = [ - Element('Entry%d' % i, {'order': '%d' % i}) - for i in ordinals - ] - - orders = permutations(elements) - orders = [{'order': order} for order in orders] - - scenarios = [('%d' % i, order) for i, order in enumerate(orders)] - return scenarios, minimum - - -class TestGetFirstBooter(MAASTestCase): - """Tests for ``get_first_booter.``""" - - scenarios, minimum = make_boot_order_scenarios(3) - - def test_first_booter(self): - root = Element('outConfigs') - root.extend(self.order) - picked = get_first_booter(root) - self.assertEqual(picked.tag, 'Entry%d' % self.minimum) - - -class TestsForStripRoKeys(MAASTestCase): - """Tests for ``strip_ro_keys.``""" - - def test_strip_ro_keys(self): - attributes = {key: 'DC' for key in RO_KEYS} - - elements = [ - Element('Element%d' % i, attributes) - for i in xrange(random.randint(0, 10)) - ] - - strip_ro_keys(elements) - - for key in RO_KEYS: - values = [element.get(key) for element in elements] - for value in values: - self.assertIsNone(value) - - -class TestMakePolicyChange(MAASTestCase): - """Tests for ``make_policy_change``.""" - - def test_lan_already_top_priority(self): - boot_profile_response = make_fake_result('configResolveChildren', - 'lsbootLan') - mock = self.patch(ucsm, 'get_first_booter') - mock.return_value = boot_profile_response[0] - change = make_policy_change(boot_profile_response) - self.assertIsNone(change) - self.assertThat(mock, MockCalledOnceWith(boot_profile_response)) - - def test_change_lan_to_top_priority(self): - boot_profile_response = Element('outConfigs') - lan_boot = Element('lsbootLan', {'order': 'second'}) - storage_boot = Element('lsbootStorage', {'order': 'first'}) - boot_profile_response.extend([lan_boot, storage_boot]) - self.patch(ucsm, 'get_first_booter').return_value = storage_boot - self.patch(ucsm, 'strip_ro_keys') - change = make_policy_change(boot_profile_response) - lan_boot_order = change.xpath('//lsbootPolicy/lsbootLan/@order') - storage_boot_order = \ - change.xpath('//lsbootPolicy/lsbootStorage/@order') - self.assertEqual(['first'], lan_boot_order) - self.assertEqual(['second'], storage_boot_order) - - -class TestSetLanBootDefault(MAASTestCase): - """Tets for ``set_lan_boot_default.``""" - - def test_no_change(self): - api = make_api() - server = make_server() - self.patch(ucsm, 'get_service_profile') - self.patch(api, 'config_resolve_children') - self.patch(ucsm, 'make_policy_change').return_value = None - config_conf_mo = self.patch(api, 'config_conf_mo') - set_lan_boot_default(api, server) - self.assertThat(config_conf_mo, MockNotCalled()) - - def test_with_change(self): - api = make_api() - server = make_server() - test_dn = make_dn() - test_change = 'change' - service_profile = Element('test', {'operBootPolicyName': test_dn}) - self.patch(ucsm, 'get_service_profile').return_value = service_profile - self.patch(api, 'config_resolve_children') - self.patch(ucsm, 'make_policy_change').return_value = test_change - config_conf_mo = self.patch(api, 'config_conf_mo') - set_lan_boot_default(api, server) - self.assertThat(config_conf_mo, - MockCalledOnceWith(test_dn, [test_change])) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/tests/test_virsh.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/tests/test_virsh.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/tests/test_virsh.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/tests/test_virsh.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,351 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for `provisioningserver.custom_hardware.virsh`. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import random -from textwrap import dedent - -from maastesting.factory import factory -from maastesting.matchers import ( - MockCalledOnceWith, - MockCallsMatch, - ) -from maastesting.testcase import MAASTestCase -from mock import call -from provisioningserver.custom_hardware import ( - utils, - virsh, - ) - - -SAMPLE_IFLIST = dedent(""" - Interface Type Source Model MAC - ------------------------------------------------------- - - bridge br0 e1000 %s - - bridge br1 e1000 %s - """) - -SAMPLE_DUMPXML = dedent(""" - - test - 4096576 - 4096576 - 1 - - hvm - - - - """) - - -class TestVirshSSH(MAASTestCase): - """Tests for `VirshSSH`.""" - - def configure_virshssh_pexpect(self, inputs=None): - """Configures the VirshSSH class to use 'cat' process - for testing instead of the actual virsh.""" - conn = virsh.VirshSSH(timeout=1) - self.addCleanup(conn.close) - self.patch(conn, '_execute') - conn._spawn('cat') - if inputs is not None: - for line in inputs: - conn.sendline(line) - return conn - - def configure_virshssh(self, output): - self.patch(virsh.VirshSSH, 'run').return_value = output - return virsh.VirshSSH() - - def test_login_prompt(self): - virsh_outputs = [ - 'virsh # ' - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - self.assertTrue(conn.login(poweraddr=None)) - - def test_login_with_sshkey(self): - virsh_outputs = [ - "The authenticity of host '127.0.0.1' can't be established.", - "ECDSA key fingerprint is " - "00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff.", - "Are you sure you want to continue connecting (yes/no)? ", - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - mock_sendline = self.patch(conn, 'sendline') - conn.login(poweraddr=None) - self.assertThat(mock_sendline, MockCalledOnceWith('yes')) - - def test_login_with_password(self): - virsh_outputs = [ - "ubuntu@%s's password: " % factory.getRandomIPAddress(), - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - fake_password = factory.make_name('password') - mock_sendline = self.patch(conn, 'sendline') - conn.login(poweraddr=None, password=fake_password) - self.assertThat(mock_sendline, MockCalledOnceWith(fake_password)) - - def test_login_missing_password(self): - virsh_outputs = [ - "ubuntu@%s's password: " % factory.getRandomIPAddress(), - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - mock_close = self.patch(conn, 'close') - self.assertFalse(conn.login(poweraddr=None, password=None)) - mock_close.assert_called() - - def test_login_invalid(self): - virsh_outputs = [ - factory.getRandomString(), - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - mock_close = self.patch(conn, 'close') - self.assertFalse(conn.login(poweraddr=None)) - mock_close.assert_called() - - def test_logout(self): - conn = self.configure_virshssh_pexpect() - mock_sendline = self.patch(conn, 'sendline') - mock_close = self.patch(conn, 'close') - conn.logout() - self.assertThat(mock_sendline, MockCalledOnceWith('quit')) - mock_close.assert_called() - - def test_prompt(self): - virsh_outputs = [ - 'virsh # ' - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - self.assertTrue(conn.prompt()) - - def test_invalid_prompt(self): - virsh_outputs = [ - factory.getRandomString() - ] - conn = self.configure_virshssh_pexpect(virsh_outputs) - self.assertFalse(conn.prompt()) - - def test_run(self): - cmd = ['list', '--all', '--name'] - expected = ' '.join(cmd) - names = [factory.make_name('machine') for _ in range(3)] - conn = self.configure_virshssh_pexpect() - conn.before = '\n'.join([expected] + names) - mock_sendline = self.patch(conn, 'sendline') - mock_prompt = self.patch(conn, 'prompt') - output = conn.run(cmd) - self.assertThat(mock_sendline, MockCalledOnceWith(expected)) - mock_prompt.assert_called() - self.assertEqual('\n'.join(names), output) - - def test_list(self): - names = [factory.make_name('machine') for _ in range(3)] - conn = self.configure_virshssh('\n'.join(names)) - expected = conn.list() - self.assertItemsEqual(names, expected) - - def test_get_state(self): - state = factory.make_name('state') - conn = self.configure_virshssh(state) - expected = conn.get_state('') - self.assertEqual(state, expected) - - def test_get_state_error(self): - conn = self.configure_virshssh('error') - expected = conn.get_state('') - self.assertEqual(None, expected) - - def test_mac_addresses_returns_list(self): - macs = [factory.getRandomMACAddress() for _ in range(2)] - output = SAMPLE_IFLIST % (macs[0], macs[1]) - conn = self.configure_virshssh(output) - expected = conn.get_mac_addresses('') - for i in range(2): - self.assertEqual(macs[i], expected[i]) - - def test_get_arch_returns_valid(self): - arch = factory.make_name('arch') - output = SAMPLE_DUMPXML % arch - conn = self.configure_virshssh(output) - expected = conn.get_arch('') - self.assertEqual(arch, expected) - - def test_get_arch_returns_valid_fixed(self): - arch = random.choice(virsh.ARCH_FIX.keys()) - fixed_arch = virsh.ARCH_FIX[arch] - output = SAMPLE_DUMPXML % arch - conn = self.configure_virshssh(output) - expected = conn.get_arch('') - self.assertEqual(fixed_arch, expected) - - -class TestVirsh(MAASTestCase): - """Tests for `probe_virsh_and_enlist`.""" - - def test_probe_and_enlist(self): - # Patch VirshSSH list so that some machines are returned - # with some fake architectures. - machines = [factory.make_name('machine') for _ in range(3)] - self.patch(virsh.VirshSSH, 'list').return_value = machines - fake_arch = factory.make_name('arch') - mock_arch = self.patch(virsh.VirshSSH, 'get_arch') - mock_arch.return_value = fake_arch - - # Patch get_state so that one of the machines is on, so we - # can check that it will be forced off. - fake_states = [ - virsh.VirshVMState.ON, - virsh.VirshVMState.OFF, - virsh.VirshVMState.OFF - ] - mock_state = self.patch(virsh.VirshSSH, 'get_state') - mock_state.side_effect = fake_states - - # Setup the power parameters that we should expect to be - # the output of the probe_and_enlist - fake_password = factory.getRandomString() - poweraddr = factory.make_name('poweraddr') - called_params = [] - fake_macs = [] - for machine in machines: - macs = [factory.getRandomMACAddress() for _ in range(3)] - fake_macs.append(macs) - called_params.append({ - 'power_address': poweraddr, - 'power_id': machine, - 'power_pass': fake_password, - }) - - # Patch the get_mac_addresses so we get a known list of - # mac addresses for each machine. - mock_macs = self.patch(virsh.VirshSSH, 'get_mac_addresses') - mock_macs.side_effect = fake_macs - - # Patch the poweroff and create as we really don't want these - # actions to occur, but want to also check that they are called. - mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') - mock_create = self.patch(utils, 'create_node') - - # Patch login and logout so that we don't really contact - # a server at the fake poweraddr - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = True - mock_logout = self.patch(virsh.VirshSSH, 'logout') - - # Perform the probe and enlist - virsh.probe_virsh_and_enlist(poweraddr, password=fake_password) - - # Check that login was called with the provided poweraddr and - # password. - self.assertThat( - mock_login, MockCalledOnceWith(poweraddr, fake_password)) - - # The first machine should have poweroff called on it, as it - # was initial in the on state. - self.assertThat( - mock_poweroff, MockCalledOnceWith(machines[0])) - - # Check that the create command had the correct parameters for - # each machine. - self.assertThat( - mock_create, MockCallsMatch( - call(fake_macs[0], fake_arch, 'virsh', called_params[0]), - call(fake_macs[1], fake_arch, 'virsh', called_params[1]), - call(fake_macs[2], fake_arch, 'virsh', called_params[2]))) - mock_logout.assert_called() - - def test_probe_and_enlist_login_failure(self): - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = False - self.assertRaises( - virsh.VirshError, virsh.probe_virsh_and_enlist, - factory.make_name('poweraddr'), password=factory.getRandomString()) - - -class TestVirshPowerControl(MAASTestCase): - """Tests for `power_control_virsh`.""" - - def test_power_control_login_failure(self): - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = False - self.assertRaises( - virsh.VirshError, virsh.power_control_virsh, - factory.make_name('poweraddr'), factory.make_name('machine'), - 'on', password=factory.getRandomString()) - - def test_power_control_on(self): - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = True - mock_state = self.patch(virsh.VirshSSH, 'get_state') - mock_state.return_value = virsh.VirshVMState.OFF - mock_poweron = self.patch(virsh.VirshSSH, 'poweron') - - poweraddr = factory.make_name('poweraddr') - machine = factory.make_name('machine') - virsh.power_control_virsh(poweraddr, machine, 'on') - - self.assertThat( - mock_login, MockCalledOnceWith(poweraddr, None)) - self.assertThat( - mock_state, MockCalledOnceWith(machine)) - self.assertThat( - mock_poweron, MockCalledOnceWith(machine)) - - def test_power_control_off(self): - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = True - mock_state = self.patch(virsh.VirshSSH, 'get_state') - mock_state.return_value = virsh.VirshVMState.ON - mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') - - poweraddr = factory.make_name('poweraddr') - machine = factory.make_name('machine') - virsh.power_control_virsh(poweraddr, machine, 'off') - - self.assertThat( - mock_login, MockCalledOnceWith(poweraddr, None)) - self.assertThat( - mock_state, MockCalledOnceWith(machine)) - self.assertThat( - mock_poweroff, MockCalledOnceWith(machine)) - - def test_power_control_bad_domain(self): - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = True - mock_state = self.patch(virsh.VirshSSH, 'get_state') - mock_state.return_value = None - - poweraddr = factory.make_name('poweraddr') - machine = factory.make_name('machine') - self.assertRaises( - virsh.VirshError, virsh.power_control_virsh, - poweraddr, machine, 'on') - - def test_power_control_power_failure(self): - mock_login = self.patch(virsh.VirshSSH, 'login') - mock_login.return_value = True - mock_state = self.patch(virsh.VirshSSH, 'get_state') - mock_state.return_value = virsh.VirshVMState.ON - mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') - mock_poweroff.return_value = False - - poweraddr = factory.make_name('poweraddr') - machine = factory.make_name('machine') - self.assertRaises( - virsh.VirshError, virsh.power_control_virsh, - poweraddr, machine, 'off') diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/ucsm.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/ucsm.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/ucsm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/ucsm.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,435 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Support for managing nodes via Cisco UCS Manager's HTTP-XML API. - -It's useful to have a cursory understanding of how UCS Manager XML API -works. Cisco has a proprietary document that describes all of this in -more detail, and I would suggest you get a copy of that if you want more -information than is provided here. - -The Cisco DevNet website for UCS Manager has a link to the document, -which is behind a login wall, and links to example UCS queries: - -https://developer.cisco.com/web/unifiedcomputing/home - -UCS Manager is a tool for managing servers. It provides an XML API for -external applications to use to interact with UCS Manager to manage -servers. The API is available via HTTP, and requests and responses are -made of XML strings. MAAS's code for interacting with a UCS Manager is -concerned with building these requests, sending them to UCS Manager, and -processing the responses. - -UCS Manager stores information in a hierarchical structure known as the -management information tree. This structure is exposed via the XML API, -where we can manipulate objects in the tree by finding them, reading -them, and writing them. - -Some definitions for terms that are used in this code: - -Boot Policy - Controls the boot order for a server. Each service profile -is associated with a boot policy. - -Distinguished Name (DN) - Each object in UCS has a unique DN, which -describes its position in the tree. This is like a fully qualified path, -and provides a way for objects to reference other objects at other -places in the tree, or for API users to look up specific objects in the -tree. - -Class - Classes define the properties and states of objects. An object's -class is given in its tag name. - -Managed Object (MO) - An object in the management information tree. -Objects are recursive, and may have children of multiple types. With the -exception of the root object, all objects have parents. In the XML API, -objects are represented as XML elements. - -Method - Actions performed by the API on managed objects. These can -change state, or read the current state, or both. - -Server - A physical server managed by UCS Manager. Servers must be -associated with service profiles in order to be used. - -Service Profile - A set of configuration options for a server. Service -profiles define the server's personality, and can be migrated from -server to server. Service profiles describe boot policy, MAC addresses, -network connectivity, IPMI configuration, and more. MAAS requires -servers to be associated with service profiles. - -UUID - The UUID for a server. MAAS persists the UUID of each UCS managed -server it enlists, and uses it as a key for looking the server up later. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -import contextlib -import urllib2 -import urlparse - -from lxml.etree import ( - Element, - tostring, - XML, - ) -import provisioningserver.custom_hardware.utils as utils - - -str = None - -__metaclass__ = type -__all__ = [ - 'power_control_ucsm', - 'probe_and_enlist_ucsm', -] - - -class UCSM_XML_API_Error(Exception): - """Failure talking to a Cisco UCS Manager.""" - - def __init__(self, msg, code): - super(UCSM_XML_API_Error, self).__init__(msg) - self.code = code - - -def make_request_data(name, fields=None, children=None): - """Build a request string for an API method.""" - root = Element(name, fields) - if children is not None: - root.extend(children) - return tostring(root) - - -def parse_response(response_string): - """Parse the response from an API method.""" - doc = XML(response_string) - - error_code = doc.get('errorCode') - if error_code is not None: - raise UCSM_XML_API_Error(doc.get('errorDescr'), error_code) - - return doc - - -class UCSM_XML_API(object): - """Provides access to a Cisco UCS Manager's XML API. Public methods - on this class correspond to UCS Manager XML API methods. - - Each request uses a new connection. The server supports keep-alive, - so this client could be optimized to use it too. - """ - - def __init__(self, url, username, password): - self.url = url - self.api_url = urlparse.urljoin(self.url, 'nuova') - self.username = username - self.password = password - self.cookie = None - - def _send_request(self, request_data): - """Issue a request via HTTP and parse the response.""" - request = urllib2.Request(self.api_url, request_data) - response = urllib2.urlopen(request) - response_text = response.read() - response_doc = parse_response(response_text) - return response_doc - - def _call(self, name, fields=None, children=None): - request_data = make_request_data(name, fields, children) - response = self._send_request(request_data) - return response - - def login(self): - """Login to the API and get a cookie. - - Logging into the API gives a new cookie in response. The cookie - will become inactive after it has been inactive for some amount - of time (10 minutes is the default.) - - UCS Manager allows a limited number of active cookies at any - point in time, so it's important to free the cookie up when - finished by logging out via the ``logout`` method. - """ - fields = {'inName': self.username, 'inPassword': self.password} - response = self._call('aaaLogin', fields) - self.cookie = response.get('outCookie') - - def logout(self): - """Logout from the API and free the cookie.""" - fields = {'inCookie': self.cookie} - self._call('aaaLogout', fields) - self.cookie = None - - def config_resolve_class(self, class_id, filters=None): - """Issue a configResolveClass request. - - This returns all of the objects of class ``class_id`` from the - UCS Manager. - - Filters provide a way of limiting the classes returned according - to their attributes. There are a number of filters available - - Cisco's XML API documentation has a full chapter on filters. - All we care about here is that filters are described with XML - elements. - """ - fields = {'cookie': self.cookie, 'classId': class_id} - - in_filters = Element('inFilter') - if filters: - in_filters.extend(filters) - - return self._call('configResolveClass', fields, [in_filters]) - - def config_resolve_children(self, dn, class_id=None): - """Issue a configResolveChildren request. - - This returns all of the children of the object named by ``dn``, - or if ``class_id`` is not None, all of the children of type - ``class_id``. - """ - fields = {'cookie': self.cookie, 'inDn': dn} - if class_id is not None: - fields['classId'] = class_id - return self._call('configResolveChildren', fields) - - def config_resolve_dn(self, dn): - """Retrieve a single object by name. - - This returns the object named by ``dn``, but not its children. - """ - fields = {'cookie': self.cookie, 'dn': dn} - return self._call('configResolveDn', fields) - - def config_conf_mo(self, dn, config_items): - """Issue a configConfMo request. - - This makes a configuration change on an object (MO). - """ - fields = {'cookie': self.cookie, 'dn': dn} - - in_configs = Element('inConfig') - in_configs.extend(config_items) - - self._call('configConfMo', fields, [in_configs]) - - -def get_servers(api, uuid=None): - """Retrieve a list of servers from the UCS Manager.""" - if uuid: - attrs = {'class': 'computeItem', 'property': 'uuid', 'value': uuid} - filters = [Element('eq', attrs)] - else: - filters = None - - resolved = api.config_resolve_class('computeItem', filters) - return resolved.xpath('//outConfigs/*') - - -def get_children(api, element, class_id): - """Retrieve a list of child elements from the UCS Manager.""" - resolved = api.config_resolve_children(element.get('dn'), class_id) - return resolved.xpath('//outConfigs/%s' % class_id) - - -def get_macs(api, server): - """Retrieve the list of MAC addresses assigned to a server. - - Network interfaces are represented by 'adaptorUnit' objects, and - are stored as children of servers. - """ - adaptors = get_children(api, server, 'adaptorUnit') - - macs = [] - for adaptor in adaptors: - host_eth_ifs = get_children(api, adaptor, 'adaptorHostEthIf') - macs.extend([h.get('mac') for h in host_eth_ifs]) - - return macs - - -def probe_servers(api): - """Retrieve the UUID and MAC addresses for servers from the UCS Manager.""" - servers = get_servers(api) - server_list = [(s, get_macs(api, s)) for s in servers] - return server_list - - -def get_server_power_control(api, server): - """Retrieve the power control object for a server.""" - service_profile_dn = server.get('assignedToDn') - resolved = api.config_resolve_children(service_profile_dn, 'lsPower') - power_controls = resolved.xpath('//outConfigs/lsPower') - return power_controls[0] - - -def set_server_power_control(api, power_control, command): - """Issue a power command to a server's power control.""" - attrs = {'state': command, 'dn': power_control.get('dn')} - power_change = Element('lsPower', attrs) - api.config_conf_mo(power_control.get('dn'), [power_change]) - - -def get_service_profile(api, server): - """Get the server's assigned service profile.""" - service_profile_dn = server.get('assignedToDn') - result = api.config_resolve_dn(service_profile_dn) - service_profile = result.xpath('//outConfig/lsServer')[0] - return service_profile - - -def get_first_booter(boot_profile_response): - """Find the device currently set to boot by default.""" - ordinals = boot_profile_response.xpath('//outConfigs/*/@order') - top_boot_order = min(ordinals) - first_query = '//outConfigs/*[@order=%s]' % top_boot_order - current_first = boot_profile_response.xpath(first_query)[0] - return current_first - - -RO_KEYS = ['access', 'type'] - - -def strip_ro_keys(elements): - """Remove read-only keys from configuration elements. - - These are keys for attributes that aren't allowed to be changed via - configConfMo request. They are included in MO's that we read from the - API; stripping these attributes lets us reuse the elements for those - MO's rather than building new ones from scratch. - """ - for ro_key in RO_KEYS: - for element in elements: - del(element.attrib[ro_key]) - - -def make_policy_change(boot_profile_response): - """Build the policy change tree required to make LAN boot first - priority. - - The original top priority will be swapped with LAN boot's original - priority. - """ - current_first = get_first_booter(boot_profile_response) - lan_boot = boot_profile_response.xpath('//outConfigs/lsbootLan')[0] - - if current_first == lan_boot: - return - - top_boot_order = current_first.get('order') - current_first.set('order', lan_boot.get('order')) - lan_boot.set('order', top_boot_order) - - elements = [current_first, lan_boot] - strip_ro_keys(elements) - policy_change = Element('lsbootPolicy') - policy_change.extend(elements) - return policy_change - - -def set_lan_boot_default(api, server): - """Set a server to boot via LAN by default. - - If LAN boot is already the top priority, no change will - be made. - - This command changes the server's boot profile, which will affect - any other servers also using that boot profile. This is ok, because - probe and enlist enlists all the servers in the chassis. - """ - service_profile = get_service_profile(api, server) - boot_profile_dn = service_profile.get('operBootPolicyName') - response = api.config_resolve_children(boot_profile_dn) - policy_change = make_policy_change(response) - if policy_change is None: - return - api.config_conf_mo(boot_profile_dn, [policy_change]) - - -@contextlib.contextmanager -def logged_in(url, username, password): - """Context manager that ensures the logout from the API occurs.""" - api = UCSM_XML_API(url, username, password) - api.login() - try: - yield api - finally: - api.logout() - - -def get_power_command(maas_power_mode, current_state): - """Translate a MAAS on/off state into a UCSM power command. - - If the node is up already and receives a request to power on, power - cycle the node. - """ - if maas_power_mode == 'on': - if current_state == 'up': - return 'cycle-immediate' - return 'admin-up' - elif maas_power_mode == 'off': - return 'admin-down' - else: - message = 'Unexpected maas power mode: %s' % (maas_power_mode) - raise AssertionError(message) - - -def power_control_ucsm(url, username, password, uuid, maas_power_mode): - """Handle calls from the power template for nodes with a power type - of 'ucsm'. - """ - with logged_in(url, username, password) as api: - # UUIDs are unique per server, so we get either one or zero - # servers for a given UUID. - [server] = get_servers(api, uuid) - power_control = get_server_power_control(api, server) - command = get_power_command(maas_power_mode, - power_control.get('state')) - set_server_power_control(api, power_control, command) - - -def probe_and_enlist_ucsm(url, username, password): - """Probe a UCS Manager and enlist all its servers. - - Here's what happens here: 1. Get a list of servers from the UCS - Manager, along with their MAC addresses. - - 2. Configure each server to boot from LAN first. - - 3. Add each server to MAAS as a new node, with a power control - method of 'ucsm'. The URL and credentials supplied are persisted - with each node so MAAS knows how to access UCSM to manage the node - in the future. - - This code expects each server in the system to have already been - associated with a service profile. The servers must have networking - configured, and their boot profiles must include a boot from LAN - option. During enlistment, the boot profile for each service profile - used by a server will be modified to move LAN boot to the highest - priority boot option. - - Also, if any node fails to enlist, this enlistment process will - stop and won't attempt to enlist any additional nodes. If a node is - already known to MAAS, it will fail to enlist, so all nodes must be - added at once. - - There is also room for optimization during enlistment. While our - client deals with a single server at a time, the API is capable - of reading/writing the settings of multiple servers in the same - request. - """ - with logged_in(url, username, password) as api: - servers = probe_servers(api) - for server, _ in servers: - set_lan_boot_default(api, server) - - for server, macs in servers: - params = { - 'power_address': url, - 'power_user': username, - 'power_pass': password, - 'uuid': server.get('uuid'), - } - utils.create_node(macs, 'amd64', 'ucsm', params) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/utils.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -# Copyright 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type - -from logging import getLogger - -from apiclient.maas_client import ( - MAASClient, - MAASDispatcher, - MAASOAuth, - ) -from provisioningserver.auth import get_recorded_api_credentials -from provisioningserver.cluster_config import get_maas_url -import simplejson as json - - -logger = getLogger(__name__) - - -def create_node(mac, arch, power_type, power_parameters): - api_credentials = get_recorded_api_credentials() - if api_credentials is None: - raise Exception('Not creating node: no API key yet.') - client = MAASClient( - MAASOAuth(*api_credentials), MAASDispatcher(), - get_maas_url()) - - data = { - 'architecture': arch, - 'power_type': power_type, - 'power_parameters': json.dumps(power_parameters), - 'mac_addresses': mac, - 'autodetect_nodegroup': 'true' - } - return client.post('/api/1.0/nodes/', 'new', **data) - - -def escape_string(data): - return repr(data).decode("ascii") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/virsh.py maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/virsh.py --- maas-1.5.4+bzr2294/src/provisioningserver/custom_hardware/virsh.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/custom_hardware/virsh.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,223 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'probe_virsh_and_enlist', - ] - -from lxml import etree -import pexpect -import provisioningserver.custom_hardware.utils as utils - - -XPATH_ARCH = "/domain/os/type/@arch" - -# Virsh stores the architecture with a different -# label then MAAS. This maps virsh architecture to -# MAAS architecture. -ARCH_FIX = { - 'x86_64': 'amd64', - 'ppc64': 'ppc64el', - } - - -class VirshVMState: - OFF = "shut off" - ON = "running" - - -class VirshError(Exception): - """Failure communicating to virsh. """ - - -class VirshSSH(pexpect.spawn): - - PROMPT = r"virsh \#" - PROMPT_SSHKEY = "(?i)are you sure you want to continue connecting" - PROMPT_PASSWORD = "(?i)(?:password)|(?:passphrase for key)" - PROMPT_DENIED = "(?i)permission denied" - PROMPT_CLOSED = "(?i)connection closed by remote host" - - PROMPTS = [ - PROMPT_SSHKEY, - PROMPT_PASSWORD, - PROMPT, - PROMPT_DENIED, - PROMPT_CLOSED, - pexpect.TIMEOUT, - pexpect.EOF, - ] - - I_PROMPT = PROMPTS.index(PROMPT) - I_PROMPT_SSHKEY = PROMPTS.index(PROMPT_SSHKEY) - I_PROMPT_PASSWORD = PROMPTS.index(PROMPT_PASSWORD) - - def __init__(self, timeout=30, maxread=2000): - super(VirshSSH, self).__init__( - None, timeout=timeout, maxread=maxread) - self.name = '' - - def _execute(self, poweraddr): - """Spawns the pexpect command.""" - cmd = 'virsh --connect %s' % poweraddr - self._spawn(cmd) - - def login(self, poweraddr, password=None): - """Starts connection to virsh.""" - self._execute(poweraddr) - i = self.expect(self.PROMPTS, timeout=10) - if i == self.I_PROMPT_SSHKEY: - # New certificate, lets always accept but if - # it changes it will fail to login. - self.sendline("yes") - i = self.expect(self.PROMPTS) - elif i == self.I_PROMPT_PASSWORD: - # Requesting password, give it if available. - if password is None: - self.close() - return False - self.sendline(password) - i = self.expect(self.PROMPTS) - - if i != self.I_PROMPT: - # Something bad happened, either disconnect, - # timeout, wrong password. - self.close() - return False - return True - - def logout(self): - """Quits the virsh session.""" - self.sendline("quit") - self.close() - - def prompt(self, timeout=None): - """Waits for virsh prompt.""" - if timeout is None: - timeout = self.timeout - i = self.expect([self.PROMPT, pexpect.TIMEOUT], timeout=timeout) - if i == 1: - return False - return True - - def run(self, args): - cmd = ' '.join(args) - self.sendline(cmd) - self.prompt() - result = self.before.splitlines() - return '\n'.join(result[1:]) - - def list(self): - """Lists all virtual machines by name.""" - machines = self.run(['list', '--all', '--name']) - return machines.strip().splitlines() - - def get_state(self, machine): - """Gets the virtual machine state.""" - state = self.run(['domstate', machine]) - state = state.strip() - if 'error' in state: - return None - return state - - def get_mac_addresses(self, machine): - """Gets list of mac addressess assigned to the virtual machine.""" - output = self.run(['domiflist', machine]).strip() - if 'error' in output: - return None - output = output.splitlines()[2:] - return [line.split()[4] for line in output] - - def get_arch(self, machine): - """Gets the virtual machine architecture.""" - output = self.run(['dumpxml', machine]).strip() - if 'error' in output: - return None - - doc = etree.XML(output) - evaluator = etree.XPathEvaluator(doc) - arch = evaluator(XPATH_ARCH)[0] - - # Fix architectures that need to be referenced by a different - # name, that MAAS understands. - return ARCH_FIX.get(arch, arch) - - def poweron(self, machine): - """Poweron a virtual machine.""" - output = self.run(['start', machine]).strip() - if 'error' in output: - return False - return True - - def poweroff(self, machine): - """Poweroff a virtual machine.""" - output = self.run(['destroy', machine]).strip() - if 'error' in output: - return False - return True - - -def probe_virsh_and_enlist(poweraddr, password=None): - """Extracts all of the virtual machines from virsh and enlists them - into MAAS. - - :param poweraddr: virsh connection string - """ - conn = VirshSSH() - if not conn.login(poweraddr, password): - raise VirshError('Failed to login to virsh console.') - - for machine in conn.list(): - arch = conn.get_arch(machine) - state = conn.get_state(machine) - macs = conn.get_mac_addresses(machine) - - # Force the machine off, as MAAS will control the machine - # and it needs to be in a known state of off. - if state == VirshVMState.ON: - conn.poweroff(machine) - - params = { - 'power_address': poweraddr, - 'power_id': machine, - } - if password is not None: - params['power_pass'] = password - utils.create_node(macs, arch, 'virsh', params) - - conn.logout() - - -def power_control_virsh(poweraddr, machine, power_change, password=None): - """Powers controls a virtual machine using virsh.""" - - # Force password to None if blank, as the power control - # script will send a blank password if one is not set. - if password == '': - password = None - - conn = VirshSSH() - if not conn.login(poweraddr, password): - raise VirshError('Failed to login to virsh console.') - - state = conn.get_state(machine) - if state is None: - raise VirshError('Failed to get domain: %s' % machine) - - if state == VirshVMState.OFF: - if power_change == 'on': - if conn.poweron(machine) is False: - raise VirshError('Failed to power on domain: %s' % machine) - elif state == VirshVMState.ON: - if power_change == 'off': - if conn.poweroff(machine) is False: - raise VirshError('Failed to power off domain: %s' % machine) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/config.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/config.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/config.py 2015-07-10 01:27:14.000000000 +0000 @@ -60,7 +60,7 @@ output = "" behaviour = chain(["if"], repeat("elsif")) for name, method in BootMethodRegistry: - if name != "pxe": + if name != "pxe" and method.arch_octet is not None: output += tempita.sub( CONDITIONAL_BOOTLOADER, behaviour=next(behaviour), @@ -83,9 +83,15 @@ return output.strip() -def get_config(**params): - """Return a DHCP config file based on the supplied parameters.""" - template_file = locate_config(TEMPLATES_DIR, 'dhcpd.conf.template') +def get_config(template_name, **params): + """Return a DHCP config file based on the supplied parameters. + + :param template_name: Template file name: `dhcpd.conf.template` for the + IPv4 template, `dhcpd6.conf.template` for the IPv6 template. + :param **params: Variables to be substituted into the template. + :return: A full configuration, as unicode text. + """ + template_file = locate_config(TEMPLATES_DIR, template_name) params['bootloader'] = compose_conditional_bootloader() params['platform_codename'] = linux_distribution()[2] params.setdefault("ntp_server") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/control.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/control.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/control.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/control.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,101 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Control the MAAS DHCP daemons.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'restart_dhcpv4', + 'restart_dhcpv6', + 'stop_dhcpv4', + 'stop_dhcpv6', + ] + +from provisioningserver.utils.shell import ( + call_and_check, + ExternalProcessError, + ) + + +def call_service_script(ip_version, subcommand): + """Issue a subcommand to one of the DHCP daemon services. + + Shells out using `sudo`, and with the `C` locale. + + :raise ExternalProcessError: if the restart command fails. + """ + service_names = { + 4: 'maas-dhcpd', + 6: 'maas-dhcpd6', + } + name = service_names[ip_version] + call_and_check( + ['sudo', '-n', 'service', name, subcommand], + env={'LC_ALL': 'C'}) + + +def restart_dhcpv4(): + """Restart the (IPv4) DHCP daemon. + + Shells out using `sudo`, and with the `C` locale. + + :raise ExternalProcessError: if the restart command fails. + """ + call_service_script(4, 'restart') + + +def restart_dhcpv6(): + """Restart the DHCPv6 daemon. + + Shells out using `sudo`, and with the `C` locale. + + :raise ExternalProcessError: if the restart command fails. + """ + call_service_script(6, 'restart') + + +def stop_dhcp_server(ip_version): + """Stop a DHCP daemon, but treat "not running" as success. + + Upstart reports an attempt to stop a service while it isn't running as an + error. We don't want that. Other errors are still propagated as normal. + """ + # This relies on the C locale being used: it looks for the specific error + # message we get in the situation where the DHCP server was not running. + try: + call_service_script(ip_version, 'stop') + except ExternalProcessError as e: + if e.returncode == 1 and e.output.strip() == "stop: Unknown instance:": + # The server wasn't running. This is success. + pass + else: + # Other error. This is still failure. + raise + + +def stop_dhcpv4(): + """Stop the (IPv4) DHCP daemon. + + Shells out using `sudo`, and with the `C` locale. + + :raise ExternalProcessError: if the restart command fails. + """ + stop_dhcp_server(4) + + +def stop_dhcpv6(): + """Stop the DHCPv6 daemon. + + Shells out using `sudo`, and with the `C` locale. + + :raise ExternalProcessError: if the restart command fails. + """ + stop_dhcp_server(6) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/detect.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/detect.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/detect.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/detect.py 2015-07-10 01:27:14.000000000 +0000 @@ -12,7 +12,9 @@ str = None __metaclass__ = type -__all__ = [] +__all__ = [ + 'probe_dhcp', + ] from contextlib import contextmanager @@ -20,7 +22,6 @@ import fcntl import httplib import json -from logging import getLogger from random import randint import socket import struct @@ -34,14 +35,10 @@ MAASDispatcher, MAASOAuth, ) -from provisioningserver.auth import ( - get_recorded_api_credentials, - get_recorded_nodegroup_uuid, - ) -from provisioningserver.cluster_config import get_maas_url +from provisioningserver.logger import get_maas_logger -logger = getLogger(__name__) +maaslog = get_maas_logger("dhcp.detect") def make_transaction_ID(): @@ -223,11 +220,11 @@ try: response = client_func(*args, **kwargs) except (HTTPError, URLError) as e: - logger.error("Failed to contact region controller:\n%s", e) + maaslog.warning("Failed to contact region controller:\n%s", e) return None code = response.getcode() if code != httplib.OK: - logger.error( + maaslog.error( "Failed talking to region controller, it returned:\n%s\n%s", code, response.read()) return None @@ -238,7 +235,7 @@ else: return None except ValueError as e: - logger.error( + maaslog.error( "Failed to decode response from region controller:\n%s", e) return None return data @@ -287,13 +284,13 @@ # which we need to ignore; it means the interface has no IP # and there's no need to scan this interface as it's not in # use. - logger.info( + maaslog.debug( "Ignoring DHCP scan for %s, it has no IP address", interface) elif e.errno == errno.ENODEV: # Errno ENODEV is "no such device". This seems an odd situation # since we're scanning detected devices, so this is probably # a bug. - logger.error( + maaslog.error( "Ignoring DHCP scan for %s, it no longer exists. Check " "your cluster interfaces configuration.", interface) else: @@ -321,7 +318,7 @@ client.post, api_path, 'report_foreign_dhcp', foreign_dhcp_ip=server) -def periodic_probe_task(): +def periodic_probe_task(api_knowledge): """Probe for DHCP servers and set NodeGroupInterface.foriegn_dhcp. This should be run periodically so that the database has an up-to-date @@ -330,28 +327,14 @@ NOTE: This uses blocking I/O with sequential polling of interfaces, and hence doesn't scale well. It's a future improvement to make to throw it in parallel threads or async I/O. - """ - # Items that the server must have sent us before we can do this. - knowledge = { - 'maas_url': get_maas_url(), - 'api_credentials': get_recorded_api_credentials(), - 'nodegroup_uuid': get_recorded_nodegroup_uuid(), - } - - if None in knowledge.values(): - # The MAAS server hasn't sent us enough information for us to do - # this yet. Leave it for another time. - logger.info( - "Not probing for rogue DHCP servers; not all required knowledge " - "received from server yet. " - "Missing: %s" % ', '.join(sorted( - name for name, value in knowledge.items() if value is None))) - return + :param api_knowledge: A dict of the information needed to be able to + make requests to the region's REST API. + """ # Determine all the active interfaces on this cluster (nodegroup). - interfaces = determine_cluster_interfaces(knowledge) + interfaces = determine_cluster_interfaces(api_knowledge) if interfaces is None: - logger.info("No interfaces on cluster, not probing DHCP.") + maaslog.info("No interfaces on cluster, not probing DHCP.") return # Iterate over interfaces and probe each one. @@ -359,7 +342,7 @@ try: servers = probe_interface(interface, ip) except socket.error: - logger.exception( + maaslog.error( "Failed to probe sockets; did you configure authbind as per " "HACKING.txt?") return @@ -368,6 +351,7 @@ # Only send one, if it gets cleared out then the # next detection pass will send a different one, if it # still exists. - update_region_controller(knowledge, interface, servers.pop()) + update_region_controller( + api_knowledge, interface, servers.pop()) else: - update_region_controller(knowledge, interface, None) + update_region_controller(api_knowledge, interface, None) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/leases_parser.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/leases_parser.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/leases_parser.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/leases_parser.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Parser for ISC dhcpd leases file. @@ -37,7 +37,7 @@ ) -ip = Regex("[0-9]{1,3}(\.[0-9]{1,3}){3}") +ip = Regex("[:0-9a-fA-F][:.0-9a-fA-F]{2,38}") mac = Regex("[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}") hardware_type = Regex('[A-Za-z0-9_-]+') args = Regex('[^"{;]+') | QuotedString('"') @@ -77,6 +77,7 @@ 'ddns-rev-name', 'ddns-text', 'fixed-address', + 'fixed-address6', 'next', 'option', 'reserved', diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/leases.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/leases.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/leases.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/leases.py 2015-07-10 01:27:14.000000000 +0000 @@ -29,48 +29,42 @@ __metaclass__ = type __all__ = [ - 'upload_leases', - 'update_leases', + 'check_lease_changes', + 'record_lease_state', ] +from collections import defaultdict import errno -import json -from logging import getLogger from os import ( fstat, stat, ) -from apiclient.maas_client import ( - MAASClient, - MAASDispatcher, - MAASOAuth, - ) -from celery.app import app_or_default -from provisioningserver import cache -from provisioningserver.auth import ( - get_recorded_api_credentials, - get_recorded_nodegroup_uuid, - ) -from provisioningserver.cluster_config import get_maas_url from provisioningserver.dhcp.leases_parser_fast import parse_leases +from provisioningserver.logger import get_maas_logger +from provisioningserver.utils.shell import objectfork -logger = getLogger(__name__) +maaslog = get_maas_logger("dhcp.leases") +# Cache for leases, and lease times. +cache = defaultdict() # Cache key for the modification time on last-processed leases file. LEASES_TIME_CACHE_KEY = 'leases_time' - # Cache key for the leases as last parsed. LEASES_CACHE_KEY = 'recorded_leases' def get_leases_file(): - """Get the location of the DHCP leases file from the config.""" - return app_or_default().conf.DHCP_LEASES_FILE + """Return the location of the DHCP leases file.""" + # This used to be configuration-based so that the development env could + # have a different location. However, nobody seems to be provisioning from + # a dev environment so it's hard-coded until that need arises, as + # converting to the pserv config would be wasted work right now. + return "/var/lib/maas/dhcp/dhcpd.leases" def get_leases_timestamp(): @@ -115,12 +109,20 @@ # These variables are shared between worker threads/processes. # A bit of inconsistency due to concurrent updates is not a problem, # but read them both at once here to reduce the scope for trouble. - previous_leases = cache.cache.get(LEASES_CACHE_KEY) - previous_leases_time = cache.cache.get(LEASES_TIME_CACHE_KEY) + previous_leases = cache.get(LEASES_CACHE_KEY) + previous_leases_time = cache.get(LEASES_TIME_CACHE_KEY) if get_leases_timestamp() == previous_leases_time: return None - parse_result = parse_leases_file() + + with objectfork() as (pid, recv, send): + if pid == 0: + # Child, where we'll do the parsing. + send(parse_leases_file()) + else: + # Parent, where we'll receive the results. + parse_result = recv() + if parse_result is not None: timestamp, leases = parse_result if leases == previous_leases: @@ -139,73 +141,5 @@ :param leases: A dict mapping each leased IP address to the MAC address that it has been assigned to. """ - cache.cache.set(LEASES_TIME_CACHE_KEY, last_change) - cache.cache.set(LEASES_CACHE_KEY, leases) - - -def list_missing_items(knowledge): - """Report items from dict `knowledge` that are still `None`.""" - return sorted(name for name, value in knowledge.items() if value is None) - - -def send_leases(leases): - """Send lease updates to the server API.""" - # Items that the server must have sent us before we can do this. - knowledge = { - 'maas_url': get_maas_url(), - 'api_credentials': get_recorded_api_credentials(), - 'nodegroup_uuid': get_recorded_nodegroup_uuid(), - } - if None in knowledge.values(): - # The MAAS server hasn't sent us enough information for us to do - # this yet. Leave it for another time. - logger.info( - "Not sending DHCP leases to server: not all required knowledge " - "received from server yet. " - "Missing: %s" - % ', '.join(list_missing_items(knowledge))) - return - - api_path = 'api/1.0/nodegroups/%s/' % knowledge['nodegroup_uuid'] - oauth = MAASOAuth(*knowledge['api_credentials']) - MAASClient(oauth, MAASDispatcher(), knowledge['maas_url']).post( - api_path, 'update_leases', leases=json.dumps(leases)) - - -def process_leases(timestamp, leases): - """Send new leases to the MAAS server.""" - record_lease_state(timestamp, leases) - send_leases(leases) - - -def upload_leases(): - """Unconditionally send the current DHCP leases to the server. - - Run this periodically just so no changes slip through the cracks. - Examples of such cracks would be: subtle races, failure to upload, - server restarts, or zone-file update commands getting lost on their - way to the DNS server. - """ - parse_result = parse_leases_file() - if parse_result: - timestamp, leases = parse_result - process_leases(timestamp, leases) - else: - logger.info( - "The DHCP leases file does not exist. This is only a problem if " - "this cluster controller is managing its DHCP server. If that's " - "the case then you need to install the 'maas-dhcp' package on " - "this cluster controller.") - - -def update_leases(): - """Check for DHCP lease updates, and send them to the server if needed. - - Run this whenever a lease has been added, removed, or changed. It - will be very cheap to run if the leases file has not been touched, - and it won't upload unless there have been relevant changes. - """ - updated_lease_info = check_lease_changes() - if updated_lease_info is not None: - timestamp, leases = updated_lease_info - process_leases(timestamp, leases) + cache[LEASES_TIME_CACHE_KEY] = last_change + cache[LEASES_CACHE_KEY] = leases diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/omshell.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/omshell.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/omshell.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/omshell.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,233 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Python wrapper around the `omshell` utility which amends objects +inside the DHCP server. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "generate_omapi_key", + "Omshell", + ] + +import os +import re +from subprocess import ( + PIPE, + Popen, + ) +from textwrap import dedent + +from provisioningserver.utils import parse_key_value_file +from provisioningserver.utils.fs import tempdir +from provisioningserver.utils.shell import ( + call_and_check, + ExternalProcessError, + ) + + +bad_key_pattern = re.compile("[+/]no|no[+/]", flags=re.IGNORECASE) + + +def call_dnssec_keygen(tmpdir): + path = os.environ.get("PATH", "").split(os.pathsep) + path.append("/usr/sbin") + env = dict(os.environ, PATH=os.pathsep.join(path)) + return call_and_check( + ['dnssec-keygen', '-r', '/dev/urandom', '-a', 'HMAC-MD5', + '-b', '512', '-n', 'HOST', '-K', tmpdir, '-q', 'omapi_key'], + env=env) + + +def run_repeated_keygen(tmpdir): + # omshell has a bug where if the chars '/' or '+' appear either + # side of the word 'no' (in any case), it throws an error like + # "partial base64 value left over". We check for that here and + # repeatedly generate a new key until a good one is generated. + + key = None + while key is None: + key_id = call_dnssec_keygen(tmpdir) + + # Locate the file that was written and strip out the Key: field in + # it. + if not key_id: + raise AssertionError("dnssec-keygen didn't generate anything") + key_id = key_id.strip() # Remove trailing newline. + key_file_name = os.path.join(tmpdir, key_id + '.private') + parsing_error = False + try: + config = parse_key_value_file(key_file_name) + except ValueError: + parsing_error = True + if parsing_error or 'Key' not in config: + raise AssertionError( + "Key field not found in output from dnssec-keygen") + + key = config['Key'] + if bad_key_pattern.search(key) is not None: + # Force a retry. + os.remove(key_file_name) # Stop dnssec_keygen complaints. + key = None + + return key + + +def generate_omapi_key(): + """Generate a HMAC-MD5 key by calling out to the dnssec-keygen tool. + + :return: The shared key suitable for OMAPI access. + :type: string + """ + # dnssec-keygen writes out files to a specified directory, so we + # need to make a temp directory for that. + # This relies on the temporary directory being accessible only to its + # owner. + temp_prefix = "%s." % os.path.basename(__file__) + with tempdir(prefix=temp_prefix) as tmpdir: + key = run_repeated_keygen(tmpdir) + return key + + +class Omshell: + """Wrap up the omshell utility in Python. + + 'omshell' is an external executable that communicates with a DHCP daemon + and manipulates its objects. This class wraps up the commands necessary + to add and remove host maps (MAC to IP). + + :param server_address: The address for the DHCP server (ip or hostname) + :param shared_key: An HMAC-MD5 key generated by dnssec-keygen like: + $ dnssec-keygen -r /dev/urandom -a HMAC-MD5 -b 512 -n HOST omapi_key + $ cat Komapi_key.+*.private |grep ^Key|cut -d ' ' -f2- + It must match the key set in the DHCP server's config which looks + like this: + + omapi-port 7911; + key omapi_key { + algorithm HMAC-MD5; + secret "XXXXXXXXX"; #<-The output from the generated key above. + }; + omapi-key omapi_key; + """ + + def __init__(self, server_address, shared_key): + self.server_address = server_address + self.shared_key = shared_key + self.command = ["omshell"] + + def _run(self, stdin): + proc = Popen(self.command, stdin=PIPE, stdout=PIPE) + stdout, stderr = proc.communicate(stdin) + if proc.poll() != 0: + raise ExternalProcessError(proc.returncode, self.command, stdout) + return proc.returncode, stdout + + def create(self, ip_address, mac_address): + # The "name" is not a host name; it's an identifier used within + # the DHCP server. We just happen to use the IP address. + stdin = dedent("""\ + server {self.server_address} + key omapi_key {self.shared_key} + connect + new host + set ip-address = {ip_address} + set hardware-address = {mac_address} + set hardware-type = 1 + set name = "{ip_address}" + create + """) + stdin = stdin.format( + self=self, ip_address=ip_address, mac_address=mac_address) + + returncode, output = self._run(stdin) + # If the call to omshell doesn't result in output containing the + # magic string 'hardware-type' then we can be reasonably sure + # that the 'create' command failed. Unfortunately there's no + # other output like "successful" to check so this is the best we + # can do. + if "hardware-type" in output: + # Success. + pass + elif "can't open object: I/O error" in output: + # Host map already existed. Treat as success. + pass + else: + raise ExternalProcessError(returncode, self.command, output) + + def remove(self, ip_address): + # The "name" is not a host name; it's an identifier used within + # the DHCP server. We just happen to use the IP address. + stdin = dedent("""\ + server {self.server_address} + key omapi_key {self.shared_key} + connect + new host + set name = "{ip_address}" + open + remove + """) + stdin = stdin.format( + self=self, ip_address=ip_address) + + returncode, output = self._run(stdin) + + # If the omshell worked, the last line should reference a null + # object. We need to strip blanks, newlines and '>' characters + # for this to work. + lines = output.strip('\n >').splitlines() + try: + last_line = lines[-1] + except IndexError: + last_line = "" + if "obj: + can't open object: I/O error + obj: host + ip-address = %(ip)s + hardware-address = %(mac)s + name = "%(hostname)s" + """) % params + shell._run = Mock(return_value=(0, error_output)) + shell.create(params['ip'], params['mac']) + # The test is that we get here without error. + pass + + def test_remove_calls_omshell_correctly(self): + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + # Instead of calling a real omshell, we'll just record the + # parameters passed to Popen. + recorder = FakeMethod(result=(0, "thing1\nthing2\nobj: ")) + shell._run = recorder + + shell.remove(ip_address) + + expected_script = dedent("""\ + server {server} + key omapi_key {key} + connect + new host + set name = "{ip}" + open + remove + """) + expected_script = expected_script.format( + server=server_address, key=shared_key, ip=ip_address) + + # Check that the 'stdin' arg contains the correct set of + # commands. + self.assertEqual([(expected_script,)], recorder.extract_args()) + + def test_remove_raises_when_omshell_fails(self): + # If the call to omshell doesn't result in output ending in the + # text 'obj: ' we can be fairly sure this operation + # failed. + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + # Fake a call that results in a failure with random output. + random_output = factory.make_string() + recorder = FakeMethod(result=(0, random_output)) + shell._run = recorder + + exc = self.assertRaises( + subprocess.CalledProcessError, shell.remove, ip_address) + self.assertEqual(random_output, exc.output) + + def test_remove_works_when_extraneous_blank_last_lines(self): + # Sometimes omshell puts blank lines after the 'obj: ' so + # we need to test that the code still works if that's the case. + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + # Fake a call that results in a something with our special output. + output = "\n> obj: \n\n" + self.patch(shell, '_run').return_value = (0, output) + self.assertIsNone(shell.remove(ip_address)) + + def test_remove_works_when_extraneous_gt_char_present(self): + # Sometimes omshell puts a leading '>' character in responses. + # We need to test that the code still works if that's the case. + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + # Fake a call that results in a something with our special output. + output = "\n>obj: \n>\n" + self.patch(shell, '_run').return_value = (0, output) + self.assertIsNone(shell.remove(ip_address)) + + def test_remove_works_when_object_already_removed(self): + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + output = "obj: \nobj: host\ncan't open object: not found\n" + self.patch(shell, '_run').return_value = (0, output) + self.assertIsNone(shell.remove(ip_address)) + + +class Test_Omshell_nullify_lease(MAASTestCase): + """Tests for Omshell.nullify_lease""" + + def test__calls_omshell_correctly(self): + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + # Instead of calling a real omshell, we'll just record the + # parameters passed to Popen. + run = self.patch(shell, '_run') + run.return_value = (0, '\nends = 00:00:00:00') + expected_script = dedent("""\ + server {server} + key omapi_key {key} + connect + new lease + set ip-address = {ip} + open + set ends = 00:00:00:00 + update + """) + expected_script = expected_script.format( + server=server_address, key=shared_key, ip=ip_address) + shell.nullify_lease(ip_address) + self.assertThat(run, MockCalledOnceWith(expected_script)) + + def test__considers_nonexistent_lease_a_success(self): + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + output = ( + "obj: \nobj: lease\nobj: lease\n" + "can't open object: not found\nobj: lease\n") + self.patch(shell, '_run').return_value = (0, output) + shell.nullify_lease(ip_address) # No exception. + self.assertThat(shell._run, MockCalledOnceWith(ANY)) + + def test__catches_invalid_error(self): + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + output = "obj: \nobj: lease\ninvalid value." + self.patch(shell, '_run').return_value = (0, output) + self.assertRaises( + ExternalProcessError, shell.nullify_lease, ip_address) + + def test__catches_failed_update(self): + server_address = factory.make_string() + shared_key = factory.make_string() + ip_address = factory.make_ipv4_address() + shell = Omshell(server_address, shared_key) + + # make "ends" different to what we asked, so the post-run check + # should fail. + output = dedent("""\ + obj: + obj: lease + obj: lease + ip-address = 0a:00:00:72 + state = 00:00:00:01 + subnet = 00:00:00:03 + pool = 00:00:00:04 + hardware-address = 00:16:3e:06:45:5e + hardware-type = 00:00:00:01 + ends = 00:00:00:FF + starts = "T@v'" + tstp = 54:41:1e:e7 + tsfp = 00:00:00:00 + atsfp = 00:00:00:00 + cltt = "T@v'" + flags = 00 + """) + self.patch(shell, '_run').return_value = (0, output) + self.assertRaises( + ExternalProcessError, shell.nullify_lease, ip_address) + + +class Test_generate_omapi_key(MAASTestCase): + """Tests for omshell.generate_omapi_key""" + + def test_generate_omapi_key_returns_a_key(self): + key = generate_omapi_key() + # Could test for != None here, but the keys end in == for a 512 + # bit length key, so that's a better check that the script was + # actually run and produced output. + self.assertThat(key, EndsWith("==")) + + def test_generate_omapi_key_leaves_no_temp_files(self): + tmpdir = self.useFixture(TempDirectory()).path + # Make mkdtemp() in omshell nest all directories within tmpdir. + self.patch(tempfile, 'tempdir', tmpdir) + generate_omapi_key() + self.assertEqual([], os.listdir(tmpdir)) + + def test_generate_omapi_key_raises_assertionerror_on_no_output(self): + self.patch(omshell, 'call_dnssec_keygen', FakeMethod()) + self.assertRaises(AssertionError, generate_omapi_key) + + def test_generate_omapi_key_raises_assertionerror_on_bad_output(self): + def returns_junk(tmpdir): + key_name = factory.make_string() + factory.make_file(tmpdir, "%s.private" % key_name) + return key_name + + self.patch(omshell, 'call_dnssec_keygen', returns_junk) + self.assertRaises(AssertionError, generate_omapi_key) + + def test_run_repeated_keygen(self): + bad_patterns = { + "+no", "/no", "no+", "no/", + "+NO", "/NO", "NO+", "NO/", + } + bad_patterns_templates = { + "foo%sbar", "one\ntwo\n%s\nthree\n", "%s", + } + # Test that a known bad key is ignored and we generate a new one + # to replace it. + bad_keys = { + # This key is known to fail with omshell. + "YXY5pr+No/8NZeodSd27wWbI8N6kIjMF/nrnFIlPwVLuByJKkQcBRtfDrD" + "LLG2U9/ND7/bIlJxEGTUnyipffHQ==", + } + # Fabricate a range of keys containing the known-bad pattern. + bad_keys.update( + template % pattern for template, pattern in product( + bad_patterns_templates, bad_patterns)) + # An iterator that we can exhaust without mutating bad_keys. + iter_bad_keys = iter(bad_keys) + # Reference to the original parse_key_value_file, before we patch. + parse_key_value_file = omshell.parse_key_value_file + + # Patch parse_key_value_file to return each of the known-bad keys + # we've created, followed by reverting to its usual behaviour. + def side_effect(*args, **kwargs): + try: + return {'Key': next(iter_bad_keys)} + except StopIteration: + return parse_key_value_file(*args, **kwargs) + + mock = self.patch(omshell, 'parse_key_value_file') + mock.side_effect = side_effect + + # generate_omapi_key() does not return a key known to be bad. + self.assertNotIn(generate_omapi_key(), bad_keys) + + +class TestCallDnsSecKeygen(MAASTestCase): + """Tests for omshell.call_dnssec_keygen.""" + + def test_runs_external_script(self): + call_and_check = self.patch(omshell, 'call_and_check') + target_dir = self.make_dir() + path = os.environ.get("PATH", "").split(os.pathsep) + path.append("/usr/sbin") + call_dnssec_keygen(target_dir) + call_and_check.assert_called_once_with( + ['dnssec-keygen', '-r', '/dev/urandom', '-a', 'HMAC-MD5', + '-b', '512', '-n', 'HOST', '-K', target_dir, '-q', 'omapi_key'], + env=ANY) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/tests/test_writer.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/tests/test_writer.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/tests/test_writer.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/tests/test_writer.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.dhcp.writer`.""" @@ -24,8 +24,12 @@ import sys from maastesting import root +from maastesting.factory import factory from maastesting.testcase import MAASTestCase +from mock import Mock from provisioningserver.dhcp import writer +from provisioningserver.dhcp.testing.config import make_subnet_config +from provisioningserver.utils.fs import read_text_file from testtools.matchers import ( ContainsAll, MatchesStructure, @@ -35,37 +39,78 @@ class TestScript(MAASTestCase): """Test the DHCP configuration writer.""" - test_args = ( - '--subnet', 'subnet', - '--interface', 'eth0', - '--subnet-mask', 'subnet-mask', - '--broadcast-ip', 'broadcast-ip', - '--dns-servers', 'dns-servers', - '--ntp-server', 'ntp-server', - '--domain-name', 'domain-name', - '--router-ip', 'router-ip', - '--ip-range-low', 'ip-range-low', - '--ip-range-high', 'ip-range-high', - '--omapi-key', 'omapi-key', - ) + def make_args(self, network=None): + """Create a fake parameter for `run`, based on `network`.""" + settings = make_subnet_config(network) + args = Mock() + args.outfile = None + args.omapi_key = factory.make_name('key') + args.subnet = settings['subnet'] + args.interface = settings['interface'] + args.subnet_mask = settings['subnet_mask'] + args.broadcast_ip = settings['broadcast_ip'] + args.dns_servers = settings['dns_servers'] + args.ntp_server = settings['ntp_server'] + args.domain_name = settings['domain_name'] + args.router_ip = settings['router_ip'] + args.ip_range_low = settings['ip_range_low'] + args.ip_range_high = settings['ip_range_high'] + return args def test_script_executable(self): - script = ["%s/bin/maas-provision" % root, "generate-dhcp-config"] - script.extend(self.test_args) + args = self.make_args() + script = [ + "%s/bin/maas-provision" % root, + "generate-dhcp-config", + '--subnet', args.subnet, + '--interface', args.interface, + '--subnet-mask', args.subnet_mask, + '--broadcast-ip', args.broadcast_ip, + '--dns-servers', args.dns_servers, + '--ntp-server', args.ntp_server, + '--domain-name', args.domain_name, + '--router-ip', args.router_ip, + '--ip-range-low', args.ip_range_low, + '--ip-range-high', args.ip_range_high, + '--omapi-key', args.omapi_key, + ] + cmd = Popen( script, stdout=PIPE, env=dict(PYTHONPATH=":".join(sys.path))) output, err = cmd.communicate() + self.assertEqual(0, cmd.returncode, err) - contains_all_params = ContainsAll( - ['subnet', 'subnet-mask', 'broadcast-ip', - 'omapi-key', 'dns-servers', 'ntp-server', 'domain-name', - 'router-ip', 'ip-range-low', 'ip-range-high']) - self.assertThat(output, contains_all_params) + + self.assertThat(output, ContainsAll([ + args.subnet, + args.subnet_mask, + args.broadcast_ip, + args.omapi_key, + args.dns_servers, + args.ntp_server, + args.domain_name, + args.router_ip, + args.ip_range_low, + args.ip_range_high, + ])) def test_arg_setup(self): + test_args = ( + '--subnet', 'subnet', + '--interface', 'eth0', + '--subnet-mask', 'subnet-mask', + '--broadcast-ip', 'broadcast-ip', + '--dns-servers', 'dns-servers', + '--ntp-server', 'ntp-server', + '--domain-name', 'domain-name', + '--router-ip', 'router-ip', + '--ip-range-low', 'ip-range-low', + '--ip-range-high', 'ip-range-high', + '--omapi-key', 'omapi-key', + ) parser = ArgumentParser() writer.add_arguments(parser) - args = parser.parse_args(self.test_args) + args = parser.parse_args(test_args) self.assertThat( args, MatchesStructure.byEquality( subnet='subnet', @@ -82,46 +127,44 @@ def test_run(self): self.patch(sys, "stdout", BytesIO()) - parser = ArgumentParser() - writer.add_arguments(parser) - args = parser.parse_args(self.test_args) + args = self.make_args(factory.make_ipv4_network()) + writer.run(args) + output = sys.stdout.getvalue() contains_all_params = ContainsAll([ - 'subnet', - 'interface', - 'subnet-mask', - 'broadcast-ip', - 'omapi-key', - 'dns-servers', - 'ntp-server', - 'domain-name', - 'router-ip', - 'ip-range-low', - 'ip-range-high', + args.subnet, + args.interface, + args.subnet_mask, + args.broadcast_ip, + args.omapi_key, + args.dns_servers, + args.ntp_server, + args.domain_name, + args.router_ip, + args.ip_range_low, + args.ip_range_high, ]) self.assertThat(output, contains_all_params) def test_run_save_to_file(self): - parser = ArgumentParser() - writer.add_arguments(parser) - outfile = os.path.join(self.make_dir(), "outfile.txt") - args = parser.parse_args( - self.test_args + ("--outfile", outfile)) + args = self.make_args() + args.outfile = os.path.join(self.make_dir(), "outfile.txt") + writer.run(args) - with open(outfile, "rb") as stream: - output = stream.read() - contains_all_params = ContainsAll([ - 'subnet', - 'interface', - 'subnet-mask', - 'broadcast-ip', - 'omapi-key', - 'dns-servers', - 'ntp-server', - 'domain-name', - 'router-ip', - 'ip-range-low', - 'ip-range-high', - ]) - self.assertThat(output, contains_all_params) + + self.assertThat( + read_text_file(args.outfile), + ContainsAll([ + args.subnet, + args.interface, + args.subnet_mask, + args.broadcast_ip, + args.omapi_key, + args.dns_servers, + args.ntp_server, + args.domain_name, + args.router_ip, + args.ip_range_low, + args.ip_range_high, + ])) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dhcp/writer.py maas-1.7.6+bzr3376/src/provisioningserver/dhcp/writer.py --- maas-1.5.4+bzr2294/src/provisioningserver/dhcp/writer.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dhcp/writer.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Generate a DHCP server configuration.""" @@ -74,14 +74,15 @@ def run(args): """Generate a DHCP server configuration, and write it to stdout.""" - params = vars(args) + params = vars(args).copy() omapi_key = params.pop('omapi_key') outfile = params.pop('outfile') kwargs = { 'dhcp_subnets': [params], 'omapi_key': omapi_key, } - output = config.get_config(**kwargs).encode("ascii") + template = 'dhcpd.conf.template' + output = config.get_config(template, **kwargs).encode("ascii") if outfile is None: sys.stdout.write(output) else: diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/diskless.py maas-1.7.6+bzr3376/src/provisioningserver/diskless.py --- maas-1.5.4+bzr2294/src/provisioningserver/diskless.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/diskless.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,242 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Generate diskless image for system to boot.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'create_diskless_disk', + 'delete_diskless_disk', + ] + +import os +from textwrap import dedent + +from provisioningserver import config +from provisioningserver.drivers.diskless import DisklessDriverRegistry +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystemRegistry, + ) +from provisioningserver.logger import get_maas_logger +from provisioningserver.utils.fs import ( + atomic_symlink, + atomic_write, + ) +from provisioningserver.utils.shell import call_and_check + + +maaslog = get_maas_logger("diskless") + + +class DisklessError(Exception): + """Error raised when issue occurs during a diskless task.""" + + +def get_diskless_store(): + """Return path to the diskless store. + + This is the location that all diskless links exist. It holds all of the + currently in use disk for diskless booting. + """ + return os.path.join( + config.BOOT_RESOURCES_STORAGE, 'diskless', 'store') + + +def compose_diskless_link_path(system_id): + """Return path to the symbolic link for the given system_id. + + This is the link that will be written into the diskless store. It is used + to reference what disks are currently being used for diskless booting. + """ + return os.path.join(get_diskless_store(), system_id) + + +def create_diskless_link(system_id, storage_path): + """Create symbolic link in the diskless store to the actual path + of the backing store. + + Each diskless driver returns an absolute path to were the data can be + accessed on the system. A symbolic link is made in the diskless store to + reference this location, so it can be retrieved later by system_id. + """ + link_path = compose_diskless_link_path(system_id) + if os.path.lexists(link_path): + raise DisklessError( + "Backend storage link already exists for: %s" % system_id) + atomic_symlink(storage_path, link_path) + + +def delete_diskless_link(system_id): + """Delete symbolic link in the diskless store.""" + link_path = compose_diskless_link_path(system_id) + if os.path.lexists(link_path): + os.unlink(link_path) + + +def read_diskless_link(system_id): + """Return actual path to the backing store, from the link + in the diskless store.""" + link_path = compose_diskless_link_path(system_id) + if not os.path.lexists(link_path): + return None + return os.readlink(link_path) + + +def get_diskless_target(system_id): + """Get the iscsi target name for the node.""" + prefix = 'iqn.2004-05.com.ubuntu:maas' + return '%s:root-diskless-%s' % (prefix, system_id) + + +def get_diskless_tgt_path(): + """Return path to maas-diskless.tgt.""" + return os.path.join( + config.BOOT_RESOURCES_STORAGE, 'diskless', 'maas-diskless.tgt') + + +def tgt_entry(system_id, image): + """Generate tgt target used for root disk + + Tgt target used by the node as its root disk. This function creates target + description in a format used by tgt-admin. It uses system_id to generate + target name and image as a path to image file which should be available. + + :param system_id: Node system_id + :param image: Path to the image which should be shared via tgt/iscsi + :return Tgt entry which can be written to tgt-admin configuration file + """ + target = get_diskless_target(system_id) + entry = dedent("""\ + + readonly 0 + backing-store "{image}" + driver iscsi + + """).format(target=target, image=image) + return entry + + +def compose_diskless_tgt_config(): + """Produce the contents of a diskless tgt conf file. + + :return: Contents for a `targets.conf` file. + :rtype: bytes + """ + tgt_entries = [] + for system_id in os.listdir(get_diskless_store()): + image_path = compose_diskless_link_path(system_id) + tgt_entries.append(tgt_entry(system_id, image_path)) + return ''.join(tgt_entries).encode('utf-8') + + +def reload_diskless_tgt(): + """Reload the diskless tgt config.""" + call_and_check([ + 'sudo', + '/usr/sbin/tgt-admin', + '--conf', get_diskless_tgt_path(), + '--update', 'ALL', + ]) + + +def update_diskless_tgt(): + """Re-writes the "maas-diskless.tgt" to include all targets that have + symlinks in the diskless store. Reloads the tgt config.""" + tgt_path = get_diskless_tgt_path() + tgt_config = compose_diskless_tgt_config() + atomic_write(tgt_config, tgt_path, mode=0644) + reload_diskless_tgt() + + +def get_diskless_driver(driver): + """Return the diskless driver object. + + :raise DisklessError: if driver does not exist. + """ + driver_obj = DisklessDriverRegistry.get_item(driver) + if driver_obj is None: + raise DisklessError( + "Cluster doesn't support diskless driver: %s" % driver) + return driver_obj + + +def compose_source_path(osystem_name, arch, subarch, release, label): + """Return path to the source file for the diskless boot image. + + Each diskless driver will use this source to initialize the disk. + """ + osystem = OperatingSystemRegistry.get_item(osystem_name) + if osystem is None: + raise DisklessError( + "OS doesn't exist in operating system registry: %s" % osystem_name) + purposes = osystem.get_boot_image_purposes(arch, subarch, release, label) + if BOOT_IMAGE_PURPOSE.DISKLESS not in purposes: + raise DisklessError( + "OS doesn't support diskless booting: %s" % osystem_name) + root_path, _ = osystem.get_xinstall_parameters() + return os.path.join( + config.BOOT_RESOURCES_STORAGE, 'current', + osystem_name, arch, subarch, release, label, root_path) + + +def create_diskless_disk(driver, driver_options, system_id, + osystem, arch, subarch, release, label): + """Creates a disk using the `driver` for the `system_id`. This disk will + be used for booting diskless.""" + source_path = compose_source_path(osystem, arch, subarch, release, label) + if not os.path.exists(source_path): + raise DisklessError("Boot resources doesn't exist: %s" % source_path) + link_path = compose_diskless_link_path(system_id) + if os.path.lexists(link_path): + raise DisklessError("Disk already exists for node: %s" % system_id) + + # Create the disk with the driver, and place the link in diskless source. + maaslog.info( + "Creating disk for node %s using driver: %s", system_id, driver) + driver_obj = get_diskless_driver(driver) + disk_path = driver_obj.create_disk( + system_id, source_path, **driver_options) + if disk_path is None or not os.path.exists(disk_path): + raise DisklessError( + "Driver failed to create disk for node: %s" % system_id) + create_diskless_link(system_id, disk_path) + + # Re-write the tgt config, to include the new disk for the node. + maaslog.info("Updating iSCSI targets.") + update_diskless_tgt() + + +def delete_diskless_disk(driver, driver_options, system_id): + """Deletes the disk that was used by the node for diskless booting.""" + link_path = compose_diskless_link_path(system_id) + if not os.path.lexists(link_path): + maaslog.warn("Disk already deleted for node: %s", system_id) + return + + maaslog.info( + "Destroying disk for node %s using driver: %s", system_id, driver) + disk_path = read_diskless_link(system_id) + if disk_path is None: + raise DisklessError( + "Failed to read diskless link for node: %s" % system_id) + if os.path.exists(disk_path): + driver_obj = get_diskless_driver(driver) + driver_obj.delete_disk(system_id, disk_path, **driver_options) + else: + maaslog.warn(( + "Assuming disk has already been removed " + "for node %s by the driver: %s"), system_id, driver) + delete_diskless_link(system_id) + + # Re-write the tgt config, to include only the remaining disks. + maaslog.info("Updating iSCSI targets.") + update_diskless_tgt() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/config.py maas-1.7.6+bzr3376/src/provisioningserver/dns/config.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/config.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,45 +14,69 @@ __metaclass__ = type __all__ = [ 'DNSConfig', - 'DNSForwardZoneConfig', - 'DNSReverseZoneConfig', 'MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME', - 'setup_rndc', + 'set_up_rndc', 'set_up_options_conf', ] -from abc import ABCMeta +from collections import namedtuple from contextlib import contextmanager from datetime import datetime import errno -from itertools import ( - chain, - imap, - islice, - ) +import os import os.path import re +import sys -from celery.app import app_or_default -from provisioningserver.dns.utils import generated_hostname -from provisioningserver.utils import ( - atomic_write, - call_and_check, - call_capture_and_check, - incremental_write, - locate_config, - ) +from provisioningserver.utils import locate_config +from provisioningserver.utils.fs import atomic_write +from provisioningserver.utils.isc import read_isc_file +from provisioningserver.utils.shell import call_and_check import tempita +NAMED_CONF_OPTIONS = 'named.conf.options' MAAS_NAMED_CONF_NAME = 'named.conf.maas' MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME = 'named.conf.options.inside.maas' MAAS_NAMED_RNDC_CONF_NAME = 'named.conf.rndc.maas' MAAS_RNDC_CONF_NAME = 'rndc.conf.maas' -conf = app_or_default().conf +def get_dns_config_dir(): + """Location of MAAS' bind configuration files.""" + setting = os.getenv( + "MAAS_DNS_CONFIG_DIR", + locate_config(os.path.pardir, "bind", "maas")) + if isinstance(setting, bytes): + fsenc = sys.getfilesystemencoding() + return setting.decode(fsenc) + else: + return setting + + +def get_bind_config_dir(): + """Location of bind configuration files.""" + setting = os.getenv( + "MAAS_BIND_CONFIG_DIR", + locate_config(os.path.pardir, "bind")) + if isinstance(setting, bytes): + fsenc = sys.getfilesystemencoding() + return setting.decode(fsenc) + else: + return setting + + +def get_dns_rndc_port(): + """RNDC port to be configured by MAAS to communicate with BIND.""" + setting = os.getenv("MAAS_DNS_RNDC_PORT", "954") + return int(setting) + + +def get_dns_default_controls(): + """Include the default RNDC controls (default RNDC key on port 953)?""" + setting = os.getenv("MAAS_DNS_DEFAULT_CONTROLS", "1") + return (setting == "1") class DNSConfigDirectoryMissing(Exception): @@ -63,6 +87,15 @@ """Raised if there's a problem with a DNS config.""" +SRVRecord = namedtuple('SRVRecord', [ + 'service', + 'priority', + 'weight', + 'port', + 'target' + ]) + + # Default 'controls' stanza to be included in the Bind configuration, to # enable "remote" administration (well, only locally) for the init scripts, # so that they can control the DNS daemon over port 953. @@ -102,7 +135,7 @@ # Generate the configuration: # - 256 bits is the recommended size for the key nowadays. # - Use urandom to avoid blocking on the random generator. - rndc_content = call_capture_and_check( + rndc_content = call_and_check( ['rndc-confgen', '-b', '256', '-r', '/dev/urandom', '-k', key_name, '-p', unicode(port).encode("ascii")]) named_comment = extract_suggested_named_conf(rndc_content) @@ -126,14 +159,13 @@ return compose_config_path(MAAS_RNDC_CONF_NAME) -def setup_rndc(): +def set_up_rndc(): """Writes out the two files needed to enable MAAS to use rndc commands: - MAAS_RNDC_CONF_NAME and MAAS_NAMED_RNDC_CONF_NAME, both stored in - conf.DNS_CONFIG_DIR. + MAAS_RNDC_CONF_NAME and MAAS_NAMED_RNDC_CONF_NAME. """ rndc_content, named_content = generate_rndc( - port=conf.DNS_RNDC_PORT, - include_default_controls=conf.DNS_DEFAULT_CONTROLS) + port=get_dns_rndc_port(), + include_default_controls=get_dns_default_controls()) target_file = get_rndc_conf_path() with open(target_file, "wb") as f: @@ -149,8 +181,7 @@ rndc_conf = get_rndc_conf_path() rndc_cmd = ['rndc', '-c', rndc_conf] rndc_cmd.extend(arguments) - with open(os.devnull, "ab") as devnull: - call_and_check(rndc_cmd, stdout=devnull) + call_and_check(rndc_cmd) # Location of DNS templates, relative to the configuration directory. @@ -170,12 +201,30 @@ "named.conf.options.inside.maas.template") template = tempita.Template.from_filename(template_path) - # Make sure "upstream_dns" is set at least to None. It's a - # special piece of config that can't be obtained in celery - # task code and we don't want to require that every call site - # has to specify it. If it's not set, the substitution will - # fail with the default template that uses this value. + # Make sure "upstream_dns" is set at least to None. It's a special piece + # of config and we don't want to require that every call site has to + # specify it. If it's not set, the substitution will fail with the default + # template that uses this value. kwargs.setdefault("upstream_dns") + + # Parse the options file and make sure MAAS doesn't define any options + # that the user has already customized. + allow_user_override_options = [ + "allow-query", + "allow-recursion", + "allow-query-cache", + ] + + try: + parsed_options = read_isc_file( + compose_bind_config_path(NAMED_CONF_OPTIONS)) + except IOError: + parsed_options = {} + + options = parsed_options.get('options', {}) + for option in allow_user_override_options: + kwargs['upstream_' + option.replace('-', '_')] = option in options + try: rendered = template.substitute(kwargs) except NameError as error: @@ -187,7 +236,12 @@ def compose_config_path(filename): """Return the full path for a DNS config or zone file.""" - return os.path.join(conf.DNS_CONFIG_DIR, filename) + return os.path.join(get_dns_config_dir(), filename) + + +def compose_bind_config_path(filename): + """Return the full path for a DNS config or zone file.""" + return os.path.join(get_bind_config_dir(), filename) def render_dns_template(template_name, *parameters): @@ -249,10 +303,12 @@ :raises DNSConfigDirectoryMissing: if the DNS configuration directory does not exist. """ + trusted_networks = kwargs.pop("trusted_networks", "") context = { 'zones': self.zones, - 'DNS_CONFIG_DIR': conf.DNS_CONFIG_DIR, + 'DNS_CONFIG_DIR': get_dns_config_dir(), 'named_rndc_conf_path': get_named_rndc_conf_path(), + 'trusted_networks': trusted_networks, 'modified': unicode(datetime.today()), } content = render_dns_template(self.template_file_name, kwargs, context) @@ -266,217 +322,3 @@ assert '"' not in target_path, ( "DNS config path contains quote: %s." % target_path) return 'include "%s";\n' % target_path - - -class DNSZoneConfigBase: - """Base class for zone writers.""" - - __metaclass__ = ABCMeta - - template_file_name = 'zone.template' - - def __init__(self, domain, zone_name, serial=None): - """ - :param domain: The domain name of the forward zone. - :param zone_name: Fully-qualified zone name. - :param serial: The serial to use in the zone file. This must increment - on each change. - """ - self.domain = domain - self.zone_name = zone_name - self.serial = serial - self.target_path = compose_config_path('zone.%s' % self.zone_name) - - def make_parameters(self): - """Return a dict of the common template parameters.""" - return { - 'domain': self.domain, - 'serial': self.serial, - 'modified': unicode(datetime.today()), - } - - @classmethod - def write_zone_file(cls, output_file, *parameters): - """Write a zone file based on the zone file template. - - There is a subtlety with zone files: their filesystem timestamp must - increase with every rewrite. Some filesystems (ext3?) only seem to - support a resolution of one second, and so this method may set an - unexpected modification time in order to maintain that property. - """ - content = render_dns_template(cls.template_file_name, *parameters) - with report_missing_config_dir(): - incremental_write(content, output_file, mode=0644) - - -class DNSForwardZoneConfig(DNSZoneConfigBase): - """Writes forward zone files. - - A forward zone config contains two kinds of mappings: "A" records map all - possible IP addresses within each of its networks to generated hostnames - based on those addresses. "CNAME" records map configured hostnames to the - matching generated IP hostnames. An additional "A" record maps the domain - to the name server itself. - """ - - def __init__(self, domain, **kwargs): - """See `DNSZoneConfigBase.__init__`. - - :param domain: The domain name of the forward zone. - :param serial: The serial to use in the zone file. This must increment - on each change. - :param networks: The networks that the mapping exists within. - :type networks: Sequence of :class:`netaddr.IPNetwork` - :param dns_ip: The IP address of the DNS server authoritative for this - zone. - :param mapping: A hostname:ip-address mapping for all known hosts in - the zone. These are configured hostnames, not the ones generated - based on IP addresses. They will be mapped as CNAME records. - """ - self._networks = kwargs.pop('networks', []) - self._dns_ip = kwargs.pop('dns_ip', None) - self._mapping = kwargs.pop('mapping', {}) - super(DNSForwardZoneConfig, self).__init__( - domain, zone_name=domain, **kwargs) - - @classmethod - def get_cname_mapping(cls, mapping): - """Return a generator mapping hostnames to generated hostnames. - - The mapping only contains hosts for which the two host names differ. - - :param mapping: A dict mapping host names to IP addresses. - :return: A generator of tuples: (host name, generated host name). - """ - # We filter out cases where the two host names are identical: it - # would be wrong to define a CNAME that maps to itself. - for hostname, ip in mapping.items(): - generated_name = generated_hostname(ip) - if generated_name != hostname: - yield (hostname, generated_name) - - @classmethod - def get_static_mapping(cls, domain, networks, dns_ip): - """Return a generator mapping a network's generated fqdns to ips. - - The generated mapping is the mapping between the generated hostnames - and the IP addresses for all the possible IP addresses in zone. - The return type is a sequence of tuples, not a dictionary, so that we - don't have to generate the whole thing at once. - - :param domain: Zone's domain name. - :param networks: Sequence of :class:`netaddr.IPNetwork` describing - the networks whose IP-based generated host names should be mapped - to the corresponding IP addresses. - :param dns_ip: IP address for the zone's authoritative DNS server. - """ - ips = imap(unicode, chain.from_iterable(networks)) - static_mapping = ((generated_hostname(ip), ip) for ip in ips) - # Add A record for the name server's IP. - return chain([('%s.' % domain, dns_ip)], static_mapping) - - def write_config(self): - """Write the zone file.""" - self.write_zone_file( - self.target_path, self.make_parameters(), - { - 'mappings': { - 'CNAME': self.get_cname_mapping(self._mapping), - 'A': self.get_static_mapping( - self.domain, self._networks, self._dns_ip), - }, - }) - - -class DNSReverseZoneConfig(DNSZoneConfigBase): - """Writes reverse zone files. - - A reverse zone mapping contains "PTR" records, each mapping - reverse-notation IP addresses within a network to the matching generated - hostname. - """ - - def __init__(self, domain, **kwargs): - """See `DNSZoneConfigBase.__init__`. - - :param domain: The domain name of the forward zone. - :param serial: The serial to use in the zone file. This must increment - on each change. - :param network: The network that the mapping exists within. - :type network: :class:`netaddr.IPNetwork` - """ - self._network = kwargs.pop("network", None) - zone_name = self.compose_zone_name(self._network) - super(DNSReverseZoneConfig, self).__init__( - domain, zone_name=zone_name, **kwargs) - - @classmethod - def compose_zone_name(cls, network): - """Return the name of the reverse zone.""" - broadcast, netmask = network.broadcast, network.netmask - octets = broadcast.words[:netmask.words.count(255)] - return '%s.in-addr.arpa' % '.'.join(imap(unicode, reversed(octets))) - - @classmethod - def shortened_reversed_ip(cls, ip, num_bytes): - """Return reversed version of least-significant bytes of IP address. - - This is used when generating reverse zone files. - - >>> DNSReverseZoneConfig.shortened_reversed_ip('192.168.251.12', 1) - '12' - >>> DNSReverseZoneConfig.shortened_reversed_ip('10.99.0.3', 3) - '3.0.99' - - :param ip: IP address. Only its least-significant bytes will be used. - The bytes that only identify the network itself are ignored. - :type ip: :class:`netaddr.IPAddress` - :param num_bytes: Number of bytes from `ip` that should be included in - the result. - :return: A string similar to an IP address, consisting of only the - last `num_bytes` octets separated by dots, in reverse order: - starting with the least-significant octet and continuing towards - the most-significant. - :rtype: unicode - """ - # XXX JeroenVermeulen 2014-01-23: Does 0 bytes really make sense? - assert 0 <= num_bytes <= 4, ( - "num_bytes is %d (should be between 0 and 4 inclusive)." - % num_bytes) - significant_octets = islice(reversed(ip.words), num_bytes) - return '.'.join(imap(unicode, significant_octets)) - - @classmethod - def get_static_mapping(cls, domain, network): - """Return reverse mapping: shortened IPs to generated fqdns. - - The reverse generated mapping is the mapping between the IP addresses - and the generated hostnames for all the possible IP addresses in zone. - - :param domain: Zone's domain name. - :param network: Network whose IP addresses should be mapped to their - corresponding generated hostnames. - :type network: :class:`netaddr.IPNetwork` - """ - # Count how many octets are needed to address hosts within the network. - # If an octet in the netmask equals 255, that means that the - # corresponding octet will be equal between all hosts in the network. - # We don't need it in our shortened reversed addresses. - num_bytes = 4 - network.netmask.words.count(255) - return ( - ( - cls.shortened_reversed_ip(ip, num_bytes), - '%s.%s.' % (generated_hostname(ip), domain), - ) - for ip in network - ) - - def write_config(self): - """Write the zone file.""" - self.write_zone_file( - self.target_path, self.make_parameters(), - { - 'mappings': { - 'PTR': self.get_static_mapping(self.domain, self._network), - }, - }) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/testing.py maas-1.7.6+bzr3376/src/provisioningserver/dns/testing.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/testing.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/testing.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,49 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test helpers for DNS.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "patch_dns_config_path", + "patch_dns_default_controls", + "patch_dns_rndc_port", +] + +import sys + +from fixtures import EnvironmentVariable + + +def patch_dns_config_path(testcase, config_dir=None): + """Set the DNS config dir to a temporary directory, and return its path.""" + fsenc = sys.getfilesystemencoding() + if config_dir is None: + config_dir = testcase.make_dir() + if isinstance(config_dir, unicode): + config_dir = config_dir.encode(fsenc) + testcase.useFixture( + EnvironmentVariable(b"MAAS_DNS_CONFIG_DIR", config_dir)) + testcase.useFixture( + EnvironmentVariable(b"MAAS_BIND_CONFIG_DIR", config_dir)) + return config_dir.decode(fsenc) + + +def patch_dns_rndc_port(testcase, port): + testcase.useFixture( + EnvironmentVariable(b"MAAS_DNS_RNDC_PORT", b"%d" % port)) + + +def patch_dns_default_controls(testcase, enable): + testcase.useFixture( + EnvironmentVariable( + b"MAAS_DNS_DEFAULT_CONTROLS", + b"1" if enable else b"0")) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/tests/test_config.py maas-1.7.6+bzr3376/src/provisioningserver/dns/tests/test_config.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/tests/test_config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/tests/test_config.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,24 +14,17 @@ __metaclass__ = type __all__ = [] -from collections import ( - Iterable, - Sequence, - ) import errno import os.path import random from textwrap import dedent -from celery.app import app_or_default +from fixtures import EnvironmentVariable from maastesting.factory import factory from maastesting.fakemethod import FakeMethod from maastesting.testcase import MAASTestCase from mock import Mock -from netaddr import ( - IPAddress, - IPNetwork, - ) +from netaddr import IPNetwork from provisioningserver.dns import config from provisioningserver.dns.config import ( compose_config_path, @@ -39,8 +32,6 @@ DNSConfig, DNSConfigDirectoryMissing, DNSConfigFail, - DNSForwardZoneConfig, - DNSReverseZoneConfig, execute_rndc_command, extract_suggested_named_conf, generate_rndc, @@ -48,37 +39,139 @@ MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME, MAAS_NAMED_RNDC_CONF_NAME, MAAS_RNDC_CONF_NAME, + NAMED_CONF_OPTIONS, render_dns_template, report_missing_config_dir, set_up_options_conf, - setup_rndc, + set_up_rndc, uncomment_named_conf, ) -from provisioningserver.dns.utils import generated_hostname +from provisioningserver.dns.testing import ( + patch_dns_config_path, + patch_dns_default_controls, + ) +from provisioningserver.dns.zoneconfig import ( + DNSForwardZoneConfig, + DNSReverseZoneConfig, + ) +from provisioningserver.utils import locate_config +from provisioningserver.utils.isc import read_isc_file from testtools.matchers import ( + AllMatch, Contains, ContainsAll, EndsWith, + Equals, FileContains, FileExists, + Is, IsInstance, MatchesAll, - MatchesStructure, Not, + SamePath, StartsWith, ) from testtools.testcase import ExpectedException from twisted.python.filepath import FilePath -conf = app_or_default().conf - - -def patch_dns_config_path(testcase): - """Set the DNS config dir to a temporary directory, and return its path.""" - config_dir = testcase.make_dir() - testcase.patch(conf, 'DNS_CONFIG_DIR', config_dir) - return config_dir +NAMED_CONF_OPTIONS_CONTENTS = dedent("""\ + options { + forwarders { + 8.8.8.8; + 8.8.4.4; + }; + dnssec-validation auto; + allow-query { any; }; + allow-recursion { trusted; }; + allow-query-cache { trusted; }; + auth-nxdomain no; + listen-on-v6 { any; }; + }; + """) + +NAMED_CONF_OPTIONS_WITH_ALLOW_QUERY_CONTENTS = dedent("""\ + options { + forwarders { + 8.8.8.8; + 8.8.4.4; + }; + dnssec-validation auto; + allow-query { any; }; + auth-nxdomain no; + listen-on-v6 { any; }; + }; + """) + +NAMED_CONF_OPTIONS_NO_ALLOW_CONTENTS = dedent("""\ + options { + forwarders { + 8.8.8.8; + 8.8.4.4; + }; + dnssec-validation auto; + auth-nxdomain no; + listen-on-v6 { any; }; + }; + """) + + +class TestHelpers(MAASTestCase): + + def test_get_dns_config_dir_defaults_to_etc_bind_maas(self): + self.useFixture(EnvironmentVariable("MAAS_DNS_CONFIG_DIR")) + self.assertThat( + config.get_dns_config_dir(), MatchesAll( + SamePath(locate_config("../bind/maas")), + IsInstance(unicode), + )) + + def test_get_dns_config_dir_checks_environ_first(self): + directory = self.make_dir() + self.useFixture(EnvironmentVariable( + "MAAS_DNS_CONFIG_DIR", directory.encode("ascii"))) + self.assertThat( + config.get_dns_config_dir(), MatchesAll( + SamePath(directory), + IsInstance(unicode), + )) + + def test_get_bind_config_dir_defaults_to_etc_bind_maas(self): + self.useFixture(EnvironmentVariable("MAAS_BIND_CONFIG_DIR")) + self.assertThat( + config.get_bind_config_dir(), MatchesAll( + SamePath(locate_config("../bind")), + IsInstance(unicode), + )) + + def test_get_bind_config_dir_checks_environ_first(self): + directory = self.make_dir() + self.useFixture(EnvironmentVariable( + "MAAS_BIND_CONFIG_DIR", directory.encode("ascii"))) + self.assertThat( + config.get_bind_config_dir(), MatchesAll( + SamePath(directory), + IsInstance(unicode), + )) + + def test_get_dns_root_port_defaults_to_954(self): + self.useFixture(EnvironmentVariable("MAAS_DNS_RNDC_PORT")) + self.assertEqual(954, config.get_dns_rndc_port()) + + def test_get_dns_root_port_checks_environ_first(self): + port = factory.pick_port() + self.useFixture(EnvironmentVariable( + "MAAS_DNS_RNDC_PORT", b"%d" % port)) + self.assertEqual(port, config.get_dns_rndc_port()) + + def test_get_dns_default_controls_defaults_to_affirmative(self): + self.useFixture(EnvironmentVariable("MAAS_DNS_DEFAULT_CONTROLS")) + self.assertTrue(config.get_dns_default_controls()) + + def test_get_dns_default_controls_checks_environ_first(self): + self.useFixture( + EnvironmentVariable("MAAS_DNS_DEFAULT_CONTROLS", "0")) + self.assertFalse(config.get_dns_default_controls()) class TestRNDCUtilities(MAASTestCase): @@ -91,9 +184,9 @@ # named_content does not include any comment. self.assertNotIn('\n#', named_content) - def test_setup_rndc_writes_configurations(self): + def test_set_up_rndc_writes_configurations(self): dns_conf_dir = patch_dns_config_path(self) - setup_rndc() + set_up_rndc() expected = ( (MAAS_RNDC_CONF_NAME, '# Start of rndc.conf'), (MAAS_NAMED_RNDC_CONF_NAME, 'controls {')) @@ -104,13 +197,70 @@ def test_set_up_options_conf_writes_configuration(self): dns_conf_dir = patch_dns_config_path(self) - fake_dns = factory.getRandomIPAddress() - set_up_options_conf(upstream_dns=fake_dns) + ips = [factory.make_ip_address() for _ in range(3)] + set_up_options_conf(upstream_dns=ips) target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) self.assertThat( target_file, - FileContains(matcher=Contains(fake_dns))) + MatchesAll(*[ + FileContains(matcher=Contains(ip)) + for ip in ips + ]) + ) + + def test_set_up_options_conf_write_config_assumes_no_overrides(self): + dns_conf_dir = patch_dns_config_path(self) + set_up_options_conf() + target_file = os.path.join( + dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) + target = read_isc_file(target_file) + self.assertThat([ + target['allow-query']['any'], + target['allow-recursion']['trusted'], + target['allow-query-cache']['trusted'], + ], AllMatch(Equals(True))) + + def test_set_up_options_conf_write_config_allows_overrides(self): + dns_conf_dir = patch_dns_config_path(self) + factory.make_file( + location=dns_conf_dir, name=NAMED_CONF_OPTIONS, + contents=NAMED_CONF_OPTIONS_CONTENTS) + set_up_options_conf() + target_file = os.path.join( + dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) + target = read_isc_file(target_file) + self.assertThat([ + target.get('allow-query'), + target.get('allow-recursion'), + target.get('allow-query-cache'), + ], AllMatch(Is(None))) + + def test_set_up_options_conf_write_config_allows_zero_overrides(self): + dns_conf_dir = patch_dns_config_path(self) + factory.make_file( + location=dns_conf_dir, name=NAMED_CONF_OPTIONS, + contents=NAMED_CONF_OPTIONS_NO_ALLOW_CONTENTS) + set_up_options_conf() + target_file = os.path.join( + dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) + target = read_isc_file(target_file) + self.assertThat([ + target['allow-query']['any'], + target['allow-recursion']['trusted'], + target['allow-query-cache']['trusted'], + ], AllMatch(Equals(True))) + + def test_set_up_options_conf_write_config_allows_single_override(self): + dns_conf_dir = patch_dns_config_path(self) + factory.make_file( + location=dns_conf_dir, name=NAMED_CONF_OPTIONS, + contents=NAMED_CONF_OPTIONS_WITH_ALLOW_QUERY_CONTENTS) + set_up_options_conf() + target_file = os.path.join( + dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) + target = read_isc_file(target_file) + self.assertIsNone(target.get('allow-query')) def test_set_up_options_conf_handles_no_upstream_dns(self): dns_conf_dir = patch_dns_config_path(self) @@ -129,8 +279,8 @@ def test_rndc_config_includes_default_controls(self): dns_conf_dir = patch_dns_config_path(self) - self.patch(conf, 'DNS_DEFAULT_CONTROLS', True) - setup_rndc() + patch_dns_default_controls(self, enable=True) + set_up_rndc() rndc_file = os.path.join(dns_conf_dir, MAAS_NAMED_RNDC_CONF_NAME) with open(rndc_file, "rb") as stream: conf_content = stream.read() @@ -140,14 +290,14 @@ recorder = FakeMethod() fake_dir = patch_dns_config_path(self) self.patch(config, 'call_and_check', recorder) - command = factory.getRandomString() + command = factory.make_string() execute_rndc_command([command]) rndc_conf_path = os.path.join(fake_dir, MAAS_RNDC_CONF_NAME) expected_command = ['rndc', '-c', rndc_conf_path, command] self.assertEqual((expected_command,), recorder.calls[0][0]) def test_extract_suggested_named_conf_extracts_section(self): - named_part = factory.getRandomString() + named_part = factory.make_string() # Actual rndc-confgen output, mildly mangled for testing purposes. # Note the awkward line break. The code works by matching that exact # line, so there's no leeway with the spacing. @@ -163,7 +313,7 @@ 'start_marker': ( 'Use with the following in named.conf, ' 'adjusting the allow list as needed:'), - 'rndc_part': factory.getRandomString(), + 'rndc_part': factory.make_string(), 'named_part': named_part, } # What you get is just the suggested named.conf that's embedded in @@ -182,7 +332,7 @@ %s # End of named.conf - """) % (factory.getRandomString(), factory.getRandomString()) + """) % (factory.make_string(), factory.make_string()) self.assertRaises( ValueError, extract_suggested_named_conf, rndc_config) @@ -233,7 +383,7 @@ def test_interpolates_parameters(self): param_name = factory.make_name('param', sep='_') - param_value = factory.getRandomString() + param_value = factory.make_string() self.assertEqual( "X %s Y" % param_value, render_dns_template( @@ -301,7 +451,7 @@ def test_write_config_errors_if_unexpected_exception(self): dnsconfig = DNSConfig() - exception = IOError(errno.EBUSY, factory.getRandomString()) + exception = IOError(errno.EBUSY, factory.make_string()) self.patch(config, 'atomic_write', Mock(side_effect=exception)) self.assertRaises(IOError, dnsconfig.write_config) @@ -309,7 +459,7 @@ # If DNSConfig is created with overwrite=False, it won't # overwrite an existing config file. target_dir = patch_dns_config_path(self) - random_content = factory.getRandomString() + random_content = factory.make_string() factory.make_file( location=target_dir, name=MAAS_NAMED_CONF_NAME, contents=random_content) @@ -331,12 +481,11 @@ def test_write_config_writes_config(self): target_dir = patch_dns_config_path(self) - domain = factory.getRandomString() + domain = factory.make_string() network = IPNetwork('192.168.0.3/24') - ip = factory.getRandomIPInNetwork(network) + ip = factory.pick_ip_in_network(network) forward_zone = DNSForwardZoneConfig( - domain, mapping={factory.getRandomString(): ip}, - networks=[network]) + domain, mapping={factory.make_string(): ip}) reverse_zone = DNSReverseZoneConfig(domain, network=network) dnsconfig = DNSConfig((forward_zone, reverse_zone)) dnsconfig.write_config() @@ -367,258 +516,7 @@ Contains(target_dir), Contains( 'include "%s/%s"' - % (conf.DNS_CONFIG_DIR, DNSConfig.target_file_name)))) - - -class TestDNSForwardZoneConfig(MAASTestCase): - """Tests for DNSForwardZoneConfig.""" - - def test_fields(self): - domain = factory.getRandomString() - serial = random.randint(1, 200) - hostname = factory.getRandomString() - network = factory.getRandomNetwork() - ip = factory.getRandomIPInNetwork(network) - mapping = {hostname: ip} - dns_zone_config = DNSForwardZoneConfig( - domain, serial=serial, mapping=mapping, networks=[network]) - self.assertThat( - dns_zone_config, - MatchesStructure.byEquality( - domain=domain, - serial=serial, - _mapping=mapping, - _networks=[network], - ) - ) - - def test_computes_dns_config_file_paths(self): - domain = factory.make_name('zone') - dns_zone_config = DNSForwardZoneConfig(domain) - self.assertEqual( - os.path.join(conf.DNS_CONFIG_DIR, 'zone.%s' % domain), - dns_zone_config.target_path) - - def test_forward_zone_get_cname_mapping_returns_iterator(self): - self.assertThat( - DNSForwardZoneConfig.get_cname_mapping( - {factory.make_name('host'): factory.getRandomIPAddress()}), - MatchesAll( - IsInstance(Iterable), Not(IsInstance(Sequence)))) - - def test_forward_zone_get_cname_mapping_skips_identity(self): - # We don't write cname records to map host names to themselves. - # Without this, a node would get an invalid cname record upon - # enlistment. - network = IPNetwork('10.250.99.0/24') - ip = factory.getRandomIPInNetwork(network) - generated_name = generated_hostname(ip) - self.assertNotIn( - generated_name, - dict(DNSForwardZoneConfig.get_cname_mapping({generated_name: ip}))) - - def test_get_static_mapping(self): - name = factory.getRandomString() - network = IPNetwork('192.12.0.1/30') - dns_ip = factory.getRandomIPInNetwork(network) - self.assertItemsEqual( - [ - ('%s.' % name, dns_ip), - (generated_hostname('192.12.0.0'), '192.12.0.0'), - (generated_hostname('192.12.0.1'), '192.12.0.1'), - (generated_hostname('192.12.0.2'), '192.12.0.2'), - (generated_hostname('192.12.0.3'), '192.12.0.3'), - ], - DNSForwardZoneConfig.get_static_mapping(name, [network], dns_ip)) - - def test_forward_zone_get_static_mapping_returns_iterator(self): - name = factory.getRandomString() - network = IPNetwork('192.12.0.1/30') - dns_ip = factory.getRandomIPInNetwork(network) - self.assertThat( - DNSForwardZoneConfig.get_static_mapping(name, [network], dns_ip), - MatchesAll( - IsInstance(Iterable), Not(IsInstance(Sequence)))) - - def test_get_static_mapping_multiple_networks(self): - name = factory.getRandomString() - networks = IPNetwork('11.11.11.11/31'), IPNetwork('22.22.22.22/31') - dns_ip = factory.getRandomIPInNetwork(networks[0]) - self.assertItemsEqual( - [ - ('%s.' % name, dns_ip), - (generated_hostname('11.11.11.10'), '11.11.11.10'), - (generated_hostname('11.11.11.11'), '11.11.11.11'), - (generated_hostname('22.22.22.22'), '22.22.22.22'), - (generated_hostname('22.22.22.23'), '22.22.22.23'), - ], - DNSForwardZoneConfig.get_static_mapping(name, networks, dns_ip), - ) - - def test_writes_dns_zone_config(self): - target_dir = patch_dns_config_path(self) - domain = factory.getRandomString() - hostname = factory.getRandomString() - network = factory.getRandomNetwork() - ip = factory.getRandomIPInNetwork(network) - dns_zone_config = DNSForwardZoneConfig( - domain, serial=random.randint(1, 100), - mapping={hostname: ip}, networks=[network]) - dns_zone_config.write_config() - self.assertThat( - os.path.join(target_dir, 'zone.%s' % domain), - FileContains( - matcher=ContainsAll( - [ - '%s IN CNAME %s' % (hostname, generated_hostname(ip)), - '%s IN A %s' % (generated_hostname(ip), ip), - ]))) - - def test_writes_dns_zone_config_with_NS_record(self): - target_dir = patch_dns_config_path(self) - network = factory.getRandomNetwork() - dns_ip = factory.getRandomIPAddress() - dns_zone_config = DNSForwardZoneConfig( - factory.getRandomString(), serial=random.randint(1, 100), - dns_ip=dns_ip, networks=[network]) - dns_zone_config.write_config() - self.assertThat( - os.path.join(target_dir, 'zone.%s' % dns_zone_config.domain), - FileContains( - matcher=ContainsAll( - [ - 'IN NS %s.' % dns_zone_config.domain, - '%s. IN A %s' % (dns_zone_config.domain, dns_ip), - ]))) - - def test_config_file_is_world_readable(self): - patch_dns_config_path(self) - network = factory.getRandomNetwork() - dns_zone_config = DNSForwardZoneConfig( - factory.getRandomString(), serial=random.randint(1, 100), - dns_ip=factory.getRandomIPAddress(), networks=[network]) - dns_zone_config.write_config() - filepath = FilePath(dns_zone_config.target_path) - self.assertTrue(filepath.getPermissions().other.read) - - -class TestDNSReverseZoneConfig(MAASTestCase): - """Tests for DNSReverseZoneConfig.""" - - def test_fields(self): - domain = factory.getRandomString() - serial = random.randint(1, 200) - network = factory.getRandomNetwork() - dns_zone_config = DNSReverseZoneConfig( - domain, serial=serial, network=network) - self.assertThat( - dns_zone_config, - MatchesStructure.byEquality( - domain=domain, - serial=serial, - _network=network, - ) - ) - - def test_shortened_reversed_ip_2(self): - self.assertEqual( - '3.0', - DNSReverseZoneConfig.shortened_reversed_ip( - IPAddress('192.156.0.3'), 2)) - - def test_shortened_reversed_ip_0(self): - self.assertEqual( - '', - DNSReverseZoneConfig.shortened_reversed_ip( - IPAddress('192.156.0.3'), 0)) - - def test_shortened_reversed_ip_4(self): - self.assertEqual( - '3.0.156.192', - DNSReverseZoneConfig.shortened_reversed_ip( - IPAddress('192.156.0.3'), 4)) - - def test_computes_dns_config_file_paths(self): - domain = factory.make_name('zone') - reverse_file_name = 'zone.168.192.in-addr.arpa' - dns_zone_config = DNSReverseZoneConfig( - domain, network=IPNetwork("192.168.0.0/22")) - self.assertEqual( - os.path.join(conf.DNS_CONFIG_DIR, reverse_file_name), - dns_zone_config.target_path) - - def test_reverse_data_slash_24(self): - # DNSReverseZoneConfig calculates the reverse data correctly for - # a /24 network. - domain = factory.make_name('zone') - network = IPNetwork('192.168.0.1/24') - dns_zone_config = DNSReverseZoneConfig(domain, network=network) - self.assertEqual( - '0.168.192.in-addr.arpa', - dns_zone_config.zone_name) - - def test_reverse_data_slash_22(self): - # DNSReverseZoneConfig calculates the reverse data correctly for - # a /22 network. - domain = factory.getRandomString() - network = IPNetwork('192.168.0.1/22') - dns_zone_config = DNSReverseZoneConfig(domain, network=network) - self.assertEqual( - '168.192.in-addr.arpa', - dns_zone_config.zone_name) - - def test_get_static_mapping_returns_iterator(self): - self.assertThat( - DNSReverseZoneConfig.get_static_mapping( - factory.make_name('zone'), - network=IPNetwork('192.12.0.1/30')), - MatchesAll( - IsInstance(Iterable), Not(IsInstance(Sequence)))) - - def test_get_static_mapping(self): - name = factory.getRandomString() - network = IPNetwork('192.12.0.1/30') - self.assertItemsEqual( - [ - ('0', '%s.' % generated_hostname('192.12.0.0', name)), - ('1', '%s.' % generated_hostname('192.12.0.1', name)), - ('2', '%s.' % generated_hostname('192.12.0.2', name)), - ('3', '%s.' % generated_hostname('192.12.0.3', name)), - ], - DNSReverseZoneConfig.get_static_mapping(name, network)) - - def test_writes_dns_zone_config_with_NS_record(self): - target_dir = patch_dns_config_path(self) - network = factory.getRandomNetwork() - dns_zone_config = DNSReverseZoneConfig( - factory.getRandomString(), serial=random.randint(1, 100), - network=network) - dns_zone_config.write_config() - self.assertThat( - os.path.join( - target_dir, 'zone.%s' % dns_zone_config.zone_name), - FileContains( - matcher=Contains('IN NS %s.' % dns_zone_config.domain))) - - def test_writes_reverse_dns_zone_config(self): - target_dir = patch_dns_config_path(self) - domain = factory.getRandomString() - network = IPNetwork('192.168.0.1/22') - dns_zone_config = DNSReverseZoneConfig( - domain, serial=random.randint(1, 100), network=network) - dns_zone_config.write_config() - reverse_file_name = 'zone.168.192.in-addr.arpa' - expected = Contains( - '10.0 IN PTR %s' % generated_hostname('192.168.0.10')) - self.assertThat( - os.path.join(target_dir, reverse_file_name), - FileContains(matcher=expected)) - - def test_reverse_config_file_is_world_readable(self): - patch_dns_config_path(self) - dns_zone_config = DNSReverseZoneConfig( - factory.getRandomString(), serial=random.randint(1, 100), - network=factory.getRandomNetwork()) - dns_zone_config.write_config() - filepath = FilePath(dns_zone_config.target_path) - self.assertTrue(filepath.getPermissions().other.read) + % ( + config.get_dns_config_dir(), + DNSConfig.target_file_name, + )))) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/tests/test_utils.py maas-1.7.6+bzr3376/src/provisioningserver/dns/tests/test_utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/tests/test_utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/tests/test_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for miscellaneous helpers.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - - -from maastesting.factory import factory -from maastesting.testcase import MAASTestCase -from netaddr import IPAddress -from provisioningserver.dns.utils import generated_hostname - - -class TestUtilities(MAASTestCase): - - def test_generated_hostname_returns_hostname(self): - self.assertEqual( - '192-168-0-1', generated_hostname('192.168.0.1')) - - def test_generated_hostname_returns_hostname_plus_domain(self): - domain = factory.getRandomString() - self.assertEqual( - '192-168-0-1.%s' % domain, - generated_hostname('192.168.0.1', domain)) - - def test_generated_hostname_accepts_IPAddress(self): - address = IPAddress("12.34.56.78") - self.assertEqual("12-34-56-78", generated_hostname(address)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/tests/test_zoneconfig.py maas-1.7.6+bzr3376/src/provisioningserver/dns/tests/test_zoneconfig.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/tests/test_zoneconfig.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/tests/test_zoneconfig.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,700 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for BIND zone config generation.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from collections import ( + Iterable, + Sequence, + ) +import os.path +import random + +from maastesting.factory import factory +from maastesting.matchers import MockNotCalled +from maastesting.testcase import MAASTestCase +from netaddr import ( + IPAddress, + IPNetwork, + IPRange, + ) +from provisioningserver.dns.config import ( + get_dns_config_dir, + SRVRecord, + ) +from provisioningserver.dns.testing import patch_dns_config_path +from provisioningserver.dns.zoneconfig import ( + DNSForwardZoneConfig, + DNSReverseZoneConfig, + ) +from testtools.matchers import ( + Contains, + ContainsAll, + Equals, + FileContains, + HasLength, + IsInstance, + MatchesAll, + MatchesStructure, + Not, + ) +from twisted.python.filepath import FilePath + + +class TestDNSForwardZoneConfig(MAASTestCase): + """Tests for DNSForwardZoneConfig.""" + + def make_srv_record(self, service=None, port=None, target=None, + priority=None, weight=None): + if service is None: + service = '.'.join(factory.make_name('_') for _ in range(2)) + if port is None: + port = factory.pick_port() + if target is None: + target = factory.make_hostname() + if priority is None: + priority = factory.pick_port() + if weight is None: + weight = factory.pick_port() + return SRVRecord( + service=service, port=port, target=target, + priority=priority, weight=weight) + + def get_srv_item_output(self, srv_record): + return '%s %s %s %s.' % ( + srv_record.priority, + srv_record.weight, + srv_record.port, + srv_record.target, + ) + + def test_fields(self): + domain = factory.make_string() + serial = random.randint(1, 200) + hostname = factory.make_string() + network = factory.make_ipv4_network() + ip = factory.pick_ip_in_network(network) + mapping = {hostname: [ip]} + dns_zone_config = DNSForwardZoneConfig( + domain, serial=serial, mapping=mapping) + self.assertThat( + dns_zone_config, + MatchesStructure.byEquality( + domain=domain, + serial=serial, + _mapping=mapping, + ) + ) + + def test_computes_dns_config_file_paths(self): + domain = factory.make_name('zone') + dns_zone_config = DNSForwardZoneConfig(domain) + self.assertEqual( + os.path.join(get_dns_config_dir(), 'zone.%s' % domain), + dns_zone_config.target_path) + + def test_get_a_mapping_returns_ipv4_mapping(self): + name = factory.make_string() + network = IPNetwork('192.12.0.1/30') + dns_ip = factory.pick_ip_in_network(network) + ipv4_mapping = { + factory.make_name('host'): factory.make_ipv4_address(), + factory.make_name('host'): factory.make_ipv4_address(), + } + ipv6_mapping = { + factory.make_name('host'): factory.make_ipv6_address(), + factory.make_name('host'): factory.make_ipv6_address(), + } + combined_mapping = { + hostname: [ip] + for hostname, ip in (ipv4_mapping.items() + ipv6_mapping.items()) + } + expected = [('%s.' % name, dns_ip)] + ipv4_mapping.items() + self.assertItemsEqual( + expected, + DNSForwardZoneConfig.get_A_mapping(combined_mapping, name, dns_ip)) + + def test_get_aaaa_mapping_returns_ipv6_mapping(self): + name = factory.make_string() + network = IPNetwork('192.12.0.1/30') + dns_ip = factory.pick_ip_in_network(network) + ipv4_mapping = { + factory.make_name('host'): factory.make_ipv4_address(), + factory.make_name('host'): factory.make_ipv4_address(), + } + ipv6_mapping = { + factory.make_name('host'): factory.make_ipv6_address(), + factory.make_name('host'): factory.make_ipv6_address(), + } + combined_mapping = { + hostname: [ip] + for hostname, ip in (ipv4_mapping.items() + ipv6_mapping.items()) + } + self.assertItemsEqual( + ipv6_mapping.items(), + DNSForwardZoneConfig.get_AAAA_mapping( + combined_mapping, name, dns_ip)) + + def test_get_srv_mapping_returns_iterator(self): + srv = self.make_srv_record() + self.assertThat( + DNSForwardZoneConfig.get_srv_mapping([srv]), + MatchesAll( + IsInstance(Iterable), Not(IsInstance(Sequence)))) + + def test_get_srv_mapping_returns_correct_format(self): + srv = self.make_srv_record() + self.assertItemsEqual([ + (srv.service, self.get_srv_item_output(srv)), + ], + DNSForwardZoneConfig.get_srv_mapping([srv])) + + def test_get_srv_mapping_handles_ip_address_target(self): + target = factory.make_ipv4_address() + srv = self.make_srv_record(target=target) + item = self.get_srv_item_output(srv) + item = item.rstrip('.') + self.assertItemsEqual([ + (srv.service, item), + ], + DNSForwardZoneConfig.get_srv_mapping([srv])) + + def test_get_srv_mapping_returns_multiple(self): + srvs = [self.make_srv_record() for _ in range(3)] + entries = [] + for srv in srvs: + entries.append((srv.service, self.get_srv_item_output(srv))) + self.assertItemsEqual( + entries, DNSForwardZoneConfig.get_srv_mapping(srvs)) + + def test_writes_dns_zone_config(self): + target_dir = patch_dns_config_path(self) + domain = factory.make_string() + network = factory.make_ipv4_network() + dns_ip = factory.pick_ip_in_network(network) + ipv4_hostname = factory.make_name('host') + ipv4_ip = factory.pick_ip_in_network(network) + ipv6_hostname = factory.make_name('host') + ipv6_ip = factory.make_ipv6_address() + mapping = { + ipv4_hostname: [ipv4_ip], + ipv6_hostname: [ipv6_ip], + } + expected_generate_directives = ( + DNSForwardZoneConfig.get_GENERATE_directives(network)) + srv = self.make_srv_record() + dns_zone_config = DNSForwardZoneConfig( + domain, serial=random.randint(1, 100), + mapping=mapping, dns_ip=dns_ip, srv_mapping=[srv], + dynamic_ranges=[IPRange(network.first, network.last)]) + dns_zone_config.write_config() + self.assertThat( + os.path.join(target_dir, 'zone.%s' % domain), + FileContains( + matcher=ContainsAll( + [ + '%s IN SRV %s' % ( + srv.service, self.get_srv_item_output(srv)), + '%s IN A %s' % (ipv4_hostname, ipv4_ip), + '%s IN AAAA %s' % (ipv6_hostname, ipv6_ip), + ] + + [ + '$GENERATE %s %s IN A %s' % ( + iterator_values, reverse_dns, hostname) + for iterator_values, reverse_dns, hostname in + expected_generate_directives + ] + ) + ) + ) + + def test_writes_dns_zone_config_with_NS_record(self): + target_dir = patch_dns_config_path(self) + dns_ip = factory.make_ipv4_address() + dns_zone_config = DNSForwardZoneConfig( + factory.make_string(), serial=random.randint(1, 100), + dns_ip=dns_ip) + dns_zone_config.write_config() + self.assertThat( + os.path.join(target_dir, 'zone.%s' % dns_zone_config.domain), + FileContains( + matcher=ContainsAll( + [ + 'IN NS %s.' % dns_zone_config.domain, + '%s. IN A %s' % (dns_zone_config.domain, dns_ip), + ]))) + + def test_ignores_generate_directives_for_v6_dynamic_ranges(self): + patch_dns_config_path(self) + domain = factory.make_string() + network = factory.make_ipv4_network() + dns_ip = factory.pick_ip_in_network(network) + ipv4_hostname = factory.make_name('host') + ipv4_ip = factory.pick_ip_in_network(network) + ipv6_hostname = factory.make_name('host') + ipv6_ip = factory.make_ipv6_address() + ipv6_network = factory.make_ipv6_network() + dynamic_range = IPRange(ipv6_network.first, ipv6_network.last) + mapping = { + ipv4_hostname: [ipv4_ip], + ipv6_hostname: [ipv6_ip], + } + srv = self.make_srv_record() + dns_zone_config = DNSForwardZoneConfig( + domain, serial=random.randint(1, 100), + mapping=mapping, dns_ip=dns_ip, srv_mapping=[srv], + dynamic_ranges=[dynamic_range]) + get_generate_directives = self.patch( + dns_zone_config, 'get_GENERATE_directives') + dns_zone_config.write_config() + self.assertThat(get_generate_directives, MockNotCalled()) + + def test_config_file_is_world_readable(self): + patch_dns_config_path(self) + dns_zone_config = DNSForwardZoneConfig( + factory.make_string(), serial=random.randint(1, 100), + dns_ip=factory.make_ipv4_address()) + dns_zone_config.write_config() + filepath = FilePath(dns_zone_config.target_path) + self.assertTrue(filepath.getPermissions().other.read) + + +class TestDNSReverseZoneConfig(MAASTestCase): + """Tests for DNSReverseZoneConfig.""" + + def test_fields(self): + domain = factory.make_string() + serial = random.randint(1, 200) + network = factory.make_ipv4_network() + dns_zone_config = DNSReverseZoneConfig( + domain, serial=serial, network=network) + self.assertThat( + dns_zone_config, + MatchesStructure.byEquality( + domain=domain, + serial=serial, + _network=network, + ) + ) + + def test_computes_dns_config_file_paths(self): + domain = factory.make_name('zone') + reverse_file_name = 'zone.168.192.in-addr.arpa' + dns_zone_config = DNSReverseZoneConfig( + domain, network=IPNetwork("192.168.0.0/22")) + self.assertEqual( + os.path.join(get_dns_config_dir(), reverse_file_name), + dns_zone_config.target_path) + + def test_reverse_zone_file(self): + # DNSReverseZoneConfig calculates the reverse zone file name + # correctly for IPv4 and IPv6 networks. + expected = [ + # IPv4 networks. + (IPNetwork('192.168.0.1/22'), '168.192.in-addr.arpa'), + (IPNetwork('192.168.0.1/24'), '0.168.192.in-addr.arpa'), + # IPv6 networks. + (IPNetwork('3ffe:801::/32'), '1.0.8.0.e.f.f.3.ip6.arpa'), + (IPNetwork('2001:db8:0::/48'), '0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'), + ( + IPNetwork('2001:ba8:1f1:400::/56'), + '4.0.1.f.1.0.8.a.b.0.1.0.0.2.ip6.arpa' + ), + ( + IPNetwork('2610:8:6800:1::/64'), + '1.0.0.0.0.0.8.6.8.0.0.0.0.1.6.2.ip6.arpa', + ), + ( + IPNetwork('2001:ba8:1f1:400::/103'), + '0.0.0.0.0.0.0.0.0.0.0.4.0.1.f.1.0.8.a.b.0.1.0.0.2.ip6.arpa', + ), + + ] + results = [] + for network, _ in expected: + domain = factory.make_name('zone') + dns_zone_config = DNSReverseZoneConfig(domain, network=network) + results.append((network, dns_zone_config.zone_name)) + self.assertEqual(expected, results) + + def test_get_ptr_mapping(self): + name = factory.make_string() + network = IPNetwork('192.12.0.1/30') + hosts = { + factory.make_string(): factory.pick_ip_in_network(network), + factory.make_string(): factory.pick_ip_in_network(network), + } + expected = [ + (IPAddress(ip).reverse_dns, '%s.%s.' % (hostname, name)) + for hostname, ip in hosts.items() + ] + mapping = { + hostname: [ip] + for hostname, ip in hosts.items() + } + self.assertItemsEqual( + expected, + DNSReverseZoneConfig.get_PTR_mapping(mapping, name, network)) + + def test_get_ptr_mapping_drops_IPs_not_in_network(self): + name = factory.make_string() + network = IPNetwork('192.12.0.1/30') + in_network_mapping = { + factory.make_string(): factory.pick_ip_in_network(network), + factory.make_string(): factory.pick_ip_in_network(network), + } + expected = [ + (IPAddress(ip).reverse_dns, '%s.%s.' % (hostname, name)) + for hostname, ip in in_network_mapping.items() + ] + mapping = { + hostname: [ip] + for hostname, ip in in_network_mapping.items() + } + extra_mapping = { + factory.make_string(): ['192.50.0.2'], + factory.make_string(): ['192.70.0.2'], + } + mapping.update(extra_mapping) + self.assertItemsEqual( + expected, + DNSReverseZoneConfig.get_PTR_mapping(mapping, name, network)) + + def test_writes_dns_zone_config_with_NS_record(self): + target_dir = patch_dns_config_path(self) + network = factory.make_ipv4_network() + dns_zone_config = DNSReverseZoneConfig( + factory.make_string(), serial=random.randint(1, 100), + network=network) + dns_zone_config.write_config() + self.assertThat( + os.path.join( + target_dir, 'zone.%s' % dns_zone_config.zone_name), + FileContains( + matcher=Contains('IN NS %s.' % dns_zone_config.domain))) + + def test_writes_reverse_dns_zone_config(self): + target_dir = patch_dns_config_path(self) + domain = factory.make_string() + network = IPNetwork('192.168.0.1/22') + dynamic_network = IPNetwork('192.168.0.1/28') + dns_zone_config = DNSReverseZoneConfig( + domain, serial=random.randint(1, 100), network=network, + dynamic_ranges=[ + IPRange(dynamic_network.first, dynamic_network.last)]) + dns_zone_config.write_config() + reverse_file_name = 'zone.168.192.in-addr.arpa' + expected_generate_directives = dns_zone_config.get_GENERATE_directives( + dynamic_network, domain) + expected = ContainsAll( + [ + 'IN NS %s' % domain + ] + + [ + '$GENERATE %s %s IN PTR %s' % ( + iterator_values, reverse_dns, hostname) + for iterator_values, reverse_dns, hostname in + expected_generate_directives + ]) + self.assertThat( + os.path.join(target_dir, reverse_file_name), + FileContains(matcher=expected)) + + def test_ignores_generate_directives_for_v6_dynamic_ranges(self): + patch_dns_config_path(self) + domain = factory.make_string() + network = IPNetwork('192.168.0.1/22') + dynamic_network = IPNetwork("%s/64" % factory.make_ipv6_address()) + dns_zone_config = DNSReverseZoneConfig( + domain, serial=random.randint(1, 100), network=network, + dynamic_ranges=[ + IPRange(dynamic_network.first, dynamic_network.last)]) + get_generate_directives = self.patch( + dns_zone_config, 'get_GENERATE_directives') + dns_zone_config.write_config() + self.assertThat(get_generate_directives, MockNotCalled()) + + def test_reverse_config_file_is_world_readable(self): + patch_dns_config_path(self) + dns_zone_config = DNSReverseZoneConfig( + factory.make_string(), serial=random.randint(1, 100), + network=factory.make_ipv4_network()) + dns_zone_config.write_config() + filepath = FilePath(dns_zone_config.target_path) + self.assertTrue(filepath.getPermissions().other.read) + + +class TestDNSReverseZoneConfig_GetGenerateDirectives(MAASTestCase): + """Tests for `DNSReverseZoneConfig.get_GENERATE_directives()`.""" + + def test_excplicitly(self): + # The other tests in this TestCase rely on + # get_expected_generate_directives(), which is quite dense. Here + # we test get_GENERATE_directives() explicitly. + ip_range = IPRange('192.168.0.55', '192.168.2.128') + expected_directives = [ + ("55-255", "$.0.168.192.in-addr.arpa.", "192-168-0-$.domain."), + ("0-255", "$.1.168.192.in-addr.arpa.", "192-168-1-$.domain."), + ("0-128", "$.2.168.192.in-addr.arpa.", "192-168-2-$.domain."), + ] + self.assertItemsEqual( + expected_directives, + DNSReverseZoneConfig.get_GENERATE_directives( + ip_range, domain="domain")) + + def get_expected_generate_directives(self, network, domain): + ip_parts = network.network.format().split('.') + relevant_ip_parts = ip_parts[:-2] + + first_address = IPAddress(network.first).format() + first_address_parts = first_address.split(".") + + if network.size < 256: + last_address = IPAddress(network.last).format() + iterator_low = int(first_address_parts[-1]) + iterator_high = last_address.split('.')[-1] + else: + iterator_low = 0 + iterator_high = 255 + + second_octet_offset = int(first_address_parts[-2]) + expected_generate_directives = [] + directives_needed = network.size / 256 + + if directives_needed == 0: + directives_needed = 1 + for num in range(directives_needed): + expected_address_base = "%s-%s" % tuple(relevant_ip_parts) + expected_address = "%s-%s-$" % ( + expected_address_base, num + second_octet_offset) + relevant_ip_parts.reverse() + expected_rdns_base = ( + "%s.%s.in-addr.arpa." % tuple(relevant_ip_parts)) + expected_rdns_template = "$.%s.%s" % ( + num + second_octet_offset, expected_rdns_base) + expected_generate_directives.append( + ( + "%s-%s" % (iterator_low, iterator_high), + expected_rdns_template, + "%s.%s." % (expected_address, domain) + )) + relevant_ip_parts.reverse() + return expected_generate_directives + + def test_returns_single_entry_for_slash_24_network(self): + network = IPNetwork("%s/24" % factory.make_ipv4_address()) + domain = factory.make_string() + expected_generate_directives = self.get_expected_generate_directives( + network, domain) + directives = DNSReverseZoneConfig.get_GENERATE_directives( + network, domain) + self.expectThat(directives, HasLength(1)) + self.assertItemsEqual(expected_generate_directives, directives) + + def test_returns_single_entry_for_tiny_network(self): + network = IPNetwork("%s/28" % factory.make_ipv4_address()) + domain = factory.make_string() + + expected_generate_directives = self.get_expected_generate_directives( + network, domain) + directives = DNSReverseZoneConfig.get_GENERATE_directives( + network, domain) + self.expectThat(directives, HasLength(1)) + self.assertItemsEqual(expected_generate_directives, directives) + + def test_returns_single_entry_for_weird_small_range(self): + ip_range = IPRange('10.0.0.1', '10.0.0.255') + domain = factory.make_string() + directives = DNSReverseZoneConfig.get_GENERATE_directives( + ip_range, domain) + self.expectThat(directives, HasLength(1)) + + def test_dtrt_for_larger_networks(self): + # For every other network size that we're not explicitly + # testing here, + # DNSReverseZoneConfig.get_GENERATE_directives() will return + # one GENERATE directive for every 255 addresses in the network. + for prefixlen in range(23, 17): + network = IPNetwork( + "%s/%s" % (factory.make_ipv4_address(), prefixlen)) + domain = factory.make_string() + directives = DNSReverseZoneConfig.get_GENERATE_directives( + network, domain) + self.expectThat(directives, HasLength(network.size / 256)) + + def test_returns_two_entries_for_slash_23_network(self): + network = IPNetwork(factory.make_ipv4_network(slash=23)) + domain = factory.make_string() + + expected_generate_directives = self.get_expected_generate_directives( + network, domain) + directives = DNSReverseZoneConfig.get_GENERATE_directives( + network, domain) + self.expectThat(directives, HasLength(2)) + self.assertItemsEqual(expected_generate_directives, directives) + + def test_ignores_network_larger_than_slash_16(self): + network = IPNetwork("%s/15" % factory.make_ipv4_address()) + self.assertEqual( + [], + DNSReverseZoneConfig.get_GENERATE_directives( + network, factory.make_string())) + + def test_ignores_networks_that_span_slash_16s(self): + # If the upper and lower bounds of a range span two /16 networks + # (but contain between them no more than 65536 addresses), + # get_GENERATE_directives() will return early + ip_range = IPRange('10.0.0.55', '10.1.0.54') + directives = DNSReverseZoneConfig.get_GENERATE_directives( + ip_range, factory.make_string()) + self.assertEqual([], directives) + + def test_sorts_output_by_hostname(self): + network = IPNetwork("10.0.0.1/23") + domain = factory.make_string() + + expected_hostname = "10-0-%s-$." + domain + "." + expected_rdns = "$.%s.0.10.in-addr.arpa." + + directives = list(DNSReverseZoneConfig.get_GENERATE_directives( + network, domain)) + self.expectThat( + directives[0], Equals( + ("0-255", expected_rdns % "0", expected_hostname % "0"))) + self.expectThat( + directives[1], Equals( + ("0-255", expected_rdns % "1", expected_hostname % "1"))) + + +class TestDNSForwardZoneConfig_GetGenerateDirectives(MAASTestCase): + """Tests for `DNSForwardZoneConfig.get_GENERATE_directives()`.""" + + def test_excplicitly(self): + # The other tests in this TestCase rely on + # get_expected_generate_directives(), which is quite dense. Here + # we test get_GENERATE_directives() explicitly. + ip_range = IPRange('192.168.0.55', '192.168.2.128') + expected_directives = [ + ("55-255", "192-168-0-$", "192.168.0.$"), + ("0-255", "192-168-1-$", "192.168.1.$"), + ("0-128", "192-168-2-$", "192.168.2.$"), + ] + self.assertItemsEqual( + expected_directives, + DNSForwardZoneConfig.get_GENERATE_directives(ip_range)) + + def get_expected_generate_directives(self, network): + ip_parts = network.network.format().split('.') + ip_parts[-1] = "$" + expected_hostname = "%s" % "-".join(ip_parts) + expected_address = ".".join(ip_parts) + + first_address = IPAddress(network.first).format() + first_address_parts = first_address.split(".") + last_address = IPAddress(network.last).format() + last_address_parts = last_address.split(".") + + if network.size < 256: + iterator_low = int(first_address_parts[-1]) + iterator_high = int(last_address_parts[-1]) + else: + iterator_low = 0 + iterator_high = 255 + + expected_iterator_values = "%s-%s" % (iterator_low, iterator_high) + + directives_needed = network.size / 256 + if directives_needed == 0: + directives_needed = 1 + expected_directives = [] + for num in range(directives_needed): + ip_parts[-2] = unicode(num + int(ip_parts[-2])) + expected_address = ".".join(ip_parts) + expected_hostname = "%s" % "-".join(ip_parts) + expected_directives.append( + ( + expected_iterator_values, + expected_hostname, + expected_address + )) + return expected_directives + + def test_returns_single_entry_for_slash_24_network(self): + network = IPNetwork("%s/24" % factory.make_ipv4_address()) + expected_directives = self.get_expected_generate_directives(network) + directives = DNSForwardZoneConfig.get_GENERATE_directives( + network) + self.expectThat(directives, HasLength(1)) + self.assertItemsEqual(expected_directives, directives) + + def test_returns_single_entry_for_tiny_network(self): + network = IPNetwork("%s/31" % factory.make_ipv4_address()) + + expected_directives = self.get_expected_generate_directives(network) + directives = DNSForwardZoneConfig.get_GENERATE_directives( + network) + self.assertEqual(1, len(expected_directives)) + self.assertItemsEqual(expected_directives, directives) + + def test_returns_two_entries_for_slash_23_network(self): + network = IPNetwork("%s/23" % factory.make_ipv4_address()) + + expected_directives = self.get_expected_generate_directives(network) + directives = DNSForwardZoneConfig.get_GENERATE_directives( + network) + self.assertEqual(2, len(expected_directives)) + self.assertItemsEqual(expected_directives, directives) + + def test_dtrt_for_larger_networks(self): + # For every other network size that we're not explicitly + # testing here, + # DNSForwardZoneConfig.get_GENERATE_directives() will return + # one GENERATE directive for every 255 addresses in the network. + for prefixlen in range(23, 16): + network = IPNetwork( + "%s/%s" % (factory.make_ipv4_address(), prefixlen)) + directives = DNSForwardZoneConfig.get_GENERATE_directives( + network) + self.assertIsEqual(network.size / 256, len(directives)) + + def test_ignores_network_larger_than_slash_16(self): + network = IPNetwork("%s/15" % factory.make_ipv4_address()) + self.assertEqual( + [], + DNSForwardZoneConfig.get_GENERATE_directives(network)) + + def test_ignores_networks_that_span_slash_16s(self): + # If the upper and lower bounds of a range span two /16 networks + # (but contain between them no more than 65536 addresses), + # get_GENERATE_directives() will return early + ip_range = IPRange('10.0.0.55', '10.1.0.54') + directives = DNSForwardZoneConfig.get_GENERATE_directives( + ip_range) + self.assertEqual([], directives) + + def test_sorts_output(self): + network = IPNetwork("10.0.0.0/23") + + expected_hostname = "10-0-%s-$" + expected_address = "10.0.%s.$" + + directives = list(DNSForwardZoneConfig.get_GENERATE_directives( + network)) + self.expectThat(len(directives), Equals(2)) + self.expectThat( + directives[0], Equals( + ("0-255", expected_hostname % "0", expected_address % "0"))) + self.expectThat( + directives[1], Equals( + ("0-255", expected_hostname % "1", expected_address % "1"))) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/utils.py maas-1.7.6+bzr3376/src/provisioningserver/dns/utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Network utilities.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'generated_hostname', - ] - - -def generated_hostname(ip, domain=None): - """Return the auto-generated hostname for the give IP. - - >>> generated_hostname('192.168.0.1') - '192-168-0-1' - >>> generated_hostname('192.168.0.1', 'mydomain.com') - '192-168-0-1.mydomain.com' - """ - hostname = unicode(ip).replace('.', '-') - if domain is not None: - return '%s.%s' % (hostname, domain) - else: - return hostname diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/dns/zoneconfig.py maas-1.7.6+bzr3376/src/provisioningserver/dns/zoneconfig.py --- maas-1.5.4+bzr2294/src/provisioningserver/dns/zoneconfig.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/dns/zoneconfig.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,431 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Classes for generating BIND zone config files.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'DNSForwardZoneConfig', + 'DNSReverseZoneConfig', + ] + + +from abc import ABCMeta +from datetime import datetime +from itertools import chain +import math + +from netaddr import ( + IPAddress, + IPNetwork, + spanning_cidr, + ) +from netaddr.core import AddrFormatError +from provisioningserver.dns.config import ( + compose_config_path, + render_dns_template, + report_missing_config_dir, + ) +from provisioningserver.utils.fs import incremental_write +from provisioningserver.utils.network import ( + intersect_iprange, + ip_range_within_network, + ) + + +def get_fqdn_or_ip_address(target): + """Returns the ip address is target is a valid ip address, otherwise + returns the target with appended '.' if missing.""" + try: + return IPAddress(target).format() + except AddrFormatError: + return target.rstrip('.') + '.' + + +def enumerate_mapping(mapping): + """Generate `(hostname, ip)` tuples from `mapping`. + + :param mapping: A dict mapping host names to lists of IP addresses. + """ + for hostname, ips in mapping.viewitems(): + for ip in ips: + yield hostname, ip + + +def get_details_for_ip_range(ip_range): + """For a given IPRange, return all subnets, a useable prefix and the + reverse DNS suffix calculated from that IP range. + + :return: A tuple of: + All subnets of /24 (or smaller if there is no /24 subnet to be + found) in `ip_range`. + A prefix made from the first two octets in the range. + A RDNS suffix calculated from the first two octets in the range. + """ + # Calculate a spanning network for the range above. There are + # 256 /24 networks in a /16, so that's the most /24s we're going + # to have to deal with; this matters later on when we iterate + # through the /24s within this network. + cidr = spanning_cidr(ip_range) + subnets = cidr.subnet(max(24, cidr.prefixlen)) + + # Split the spanning network into /24 subnets, then see if they fall + # entirely within the original network range, partially, or not at + # all. + intersecting_subnets = [] + for subnet in subnets: + intersect = intersect_iprange(subnet, ip_range) + if intersect is None: + # The subnet does not fall within the original network. + pass + else: + # The subnet falls partially within the original network, so print + # out a $GENERATE expression for a subset of the /24. + intersecting_subnets.append(intersect) + + octet_one = (cidr.value & 0xff000000) >> 24 + octet_two = (cidr.value & 0x00ff0000) >> 16 + + # The first two octets of the network range formatted in the + # usual dotted-quad style. We can precalculate the start of any IP + # address in the range because we're only ever dealing with /16 + # networks and smaller. + prefix = "%d.%d" % (octet_one, octet_two) + + # Similarly, we can calculate what the reverse DNS suffix is going + # to look like. + rdns_suffix = "%d.%d.in-addr.arpa." % (octet_two, octet_one) + return intersecting_subnets, prefix, rdns_suffix + + +class DNSZoneConfigBase: + """Base class for zone writers.""" + + __metaclass__ = ABCMeta + + template_file_name = 'zone.template' + + def __init__(self, domain, zone_name, serial=None): + """ + :param domain: The domain name of the forward zone. + :param zone_name: Fully-qualified zone name. + :param serial: The serial to use in the zone file. This must increment + on each change. + """ + self.domain = domain + self.zone_name = zone_name + self.serial = serial + self.target_path = compose_config_path('zone.%s' % self.zone_name) + + def make_parameters(self): + """Return a dict of the common template parameters.""" + return { + 'domain': self.domain, + 'serial': self.serial, + 'modified': unicode(datetime.today()), + } + + @classmethod + def write_zone_file(cls, output_file, *parameters): + """Write a zone file based on the zone file template. + + There is a subtlety with zone files: their filesystem timestamp must + increase with every rewrite. Some filesystems (ext3?) only seem to + support a resolution of one second, and so this method may set an + unexpected modification time in order to maintain that property. + """ + content = render_dns_template(cls.template_file_name, *parameters) + with report_missing_config_dir(): + incremental_write(content, output_file, mode=0644) + + +class DNSForwardZoneConfig(DNSZoneConfigBase): + """Writes forward zone files. + + A forward zone config contains two kinds of mappings: "A" records map all + possible IP addresses within each of its networks to generated hostnames + based on those addresses. "CNAME" records map configured hostnames to the + matching generated IP hostnames. An additional "A" record maps the domain + to the name server itself. + """ + + def __init__(self, domain, **kwargs): + """See `DNSZoneConfigBase.__init__`. + + :param domain: The domain name of the forward zone. + :param serial: The serial to use in the zone file. This must increment + on each change. + :param dns_ip: The IP address of the DNS server authoritative for this + zone. + :param mapping: A hostname:ip-addresses mapping for all known hosts in + the zone. They will be mapped as A records. + :param srv_mapping: Set of SRVRecord mappings. + """ + self._dns_ip = kwargs.pop('dns_ip', None) + self._mapping = kwargs.pop('mapping', {}) + self._network = kwargs.pop('network', None) + self._dynamic_ranges = kwargs.pop('dynamic_ranges', []) + self._srv_mapping = kwargs.pop('srv_mapping', []) + super(DNSForwardZoneConfig, self).__init__( + domain, zone_name=domain, **kwargs) + + @classmethod + def get_mapping(cls, mapping, domain, dns_ip): + """Return a generator mapping hostnames to IP addresses. + + This includes the record for the name server's IP. + + :param mapping: A dict mapping host names to lists of IP addresses. + :param domain: Zone's domain name. + :param dns_ip: IP address for the zone's authoritative DNS server. + :return: A generator of tuples: (host name, IP address). + """ + return chain( + [('%s.' % domain, dns_ip)], + enumerate_mapping(mapping)) + + @classmethod + def get_A_mapping(cls, mapping, domain, dns_ip): + """Return a generator mapping hostnames to IP addresses for all + the IPv4 addresses in `mapping`. + + The returned mapping is meant to be used to generate A records in + the forward zone file. + + This includes the A record for the name server's IP. + :param mapping: A dict mapping host names to lists of IP addresses. + :param domain: Zone's domain name. + :param dns_ip: IP address for the zone's authoritative DNS server. + :return: A generator of tuples: (host name, IP address). + """ + mapping = cls.get_mapping(mapping, domain, dns_ip) + return (item for item in mapping if IPAddress(item[1]).version == 4) + + @classmethod + def get_AAAA_mapping(cls, mapping, domain, dns_ip): + """Return a generator mapping hostnames to IP addresses for all + the IPv6 addresses in `mapping`. + + The returned mapping is meant to be used to generate AAAA records + in the forward zone file. + + :param mapping: A dict mapping host names to lists of IP addresses. + :param domain: Zone's domain name. + :param dns_ip: IP address for the zone's authoritative DNS server. + :return: A generator of tuples: (host name, IP address). + """ + mapping = cls.get_mapping(mapping, domain, dns_ip) + return (item for item in mapping if IPAddress(item[1]).version == 6) + + @classmethod + def get_srv_mapping(cls, mappings): + """Return a generator mapping srv entries to hostnames. + + :param mappings: Set of SRVRecord. + :return: A generator of tuples: + (service, 'priority weight port target'). + """ + for record in mappings: + target = get_fqdn_or_ip_address(record.target) + item = '%s %s %s %s' % ( + record.priority, + record.weight, + record.port, + target) + yield (record.service, item) + + @classmethod + def get_GENERATE_directives(cls, dynamic_range): + """Return the GENERATE directives for the forward zone of a network. + """ + slash_16 = IPNetwork("%s/16" % IPAddress(dynamic_range.first)) + if (dynamic_range.size > 256 ** 2 or + not ip_range_within_network(dynamic_range, slash_16)): + # We can't issue a sane set of $GENERATEs for any network + # larger than a /16, or for one that spans two /16s, so we + # don't try. + return [] + + generate_directives = set() + subnets, prefix, _ = get_details_for_ip_range(dynamic_range) + for subnet in subnets: + iterator = "%d-%d" % ( + (subnet.first & 0x000000ff), + (subnet.last & 0x000000ff)) + + hostname = "%s-%d-$" % ( + prefix.replace('.', '-'), + # Calculate what the third quad (i.e. 10.0.X.1) value should + # be for this subnet. + (subnet.first & 0x0000ff00) >> 8, + ) + + ip_address = "%s.%d.$" % ( + prefix, + (subnet.first & 0x0000ff00) >> 8) + generate_directives.add((iterator, hostname, ip_address)) + + return sorted( + generate_directives, key=lambda directive: directive[2]) + + def write_config(self): + """Write the zone file.""" + # Create GENERATE directives for IPv4 ranges. + generate_directives = list( + chain.from_iterable( + self.get_GENERATE_directives(dynamic_range) + for dynamic_range in self._dynamic_ranges + if dynamic_range.version == 4 + )) + self.write_zone_file( + self.target_path, self.make_parameters(), + { + 'mappings': { + 'SRV': self.get_srv_mapping( + self._srv_mapping), + 'A': self.get_A_mapping( + self._mapping, self.domain, self._dns_ip), + 'AAAA': self.get_AAAA_mapping( + self._mapping, self.domain, self._dns_ip), + }, + 'generate_directives': { + 'A': generate_directives, + } + }) + + +class DNSReverseZoneConfig(DNSZoneConfigBase): + """Writes reverse zone files. + + A reverse zone mapping contains "PTR" records, each mapping + reverse-notation IP addresses within a network to the matching generated + hostname. + """ + + def __init__(self, domain, **kwargs): + """See `DNSZoneConfigBase.__init__`. + + :param domain: The domain name of the forward zone. + :param serial: The serial to use in the zone file. This must increment + on each change. + :param mapping: A hostname:ips mapping for all known hosts in + the reverse zone. They will be mapped as PTR records. IP + addresses not in `network` will be dropped. + :param network: The network that the mapping exists within. + :type network: :class:`netaddr.IPNetwork` + """ + self._mapping = kwargs.pop('mapping', {}) + self._network = kwargs.pop("network", None) + self._dynamic_ranges = kwargs.pop('dynamic_ranges', []) + zone_name = self.compose_zone_name(self._network) + super(DNSReverseZoneConfig, self).__init__( + domain, zone_name=zone_name, **kwargs) + + @classmethod + def compose_zone_name(cls, network): + """Return the name of the reverse zone.""" + # Generate the name of the reverse zone file: + # Use netaddr's reverse_dns() to get the reverse IP name + # of the first IP address in the network and then drop the first + # octets of that name (i.e. drop the octets that will be specified in + # the zone file). + first = IPAddress(network.first) + if first.version == 6: + # IPv6. + # Use float division and ceil to cope with network sizes that + # are not divisible by 4. + rest_limit = int(math.ceil((128 - network.prefixlen) / 4.)) + else: + # IPv4. + # Use float division and ceil to cope with splits not done on + # octets boundaries. + rest_limit = int(math.ceil((32 - network.prefixlen) / 8.)) + reverse_name = first.reverse_dns.split('.', rest_limit)[-1] + # Strip off trailing '.'. + return reverse_name[:-1] + + @classmethod + def get_PTR_mapping(cls, mapping, domain, network): + """Return reverse mapping: reverse IPs to hostnames. + + The reverse generated mapping is the mapping between the reverse + IP addresses and the hostnames for all the IP addresses in the given + `mapping`. + + The returned mapping is meant to be used to generate PTR records in + the reverse zone file. + + :param mapping: A hostname:ip-addresses mapping for all known hosts in + the reverse zone. + :param domain: Zone's domain name. + :param network: Zone's network. + :type network: :class:`netaddr.IPNetwork` + """ + return ( + ( + IPAddress(ip).reverse_dns, + '%s.%s.' % (hostname, domain), + ) + for hostname, ip in enumerate_mapping(mapping) + # Filter out the IP addresses that are not in `network`. + if IPAddress(ip) in network + ) + + @classmethod + def get_GENERATE_directives(cls, dynamic_range, domain): + """Return the GENERATE directives for the reverse zone of a network.""" + slash_16 = IPNetwork("%s/16" % IPAddress(dynamic_range.first)) + if (dynamic_range.size > 256 ** 2 or + not ip_range_within_network(dynamic_range, slash_16)): + # We can't issue a sane set of $GENERATEs for any network + # larger than a /16, or for one that spans two /16s, so we + # don't try. + return [] + + generate_directives = set() + subnets, prefix, rdns_suffix = get_details_for_ip_range(dynamic_range) + for subnet in subnets: + iterator = "%d-%d" % ( + (subnet.first & 0x000000ff), + (subnet.last & 0x000000ff)) + hostname = "%s-%d-$" % ( + prefix.replace('.', '-'), + (subnet.first & 0x0000ff00) >> 8) + rdns = "$.%d.%s" % ( + (subnet.first & 0x0000ff00) >> 8, + rdns_suffix) + generate_directives.add( + (iterator, rdns, "%s.%s." % (hostname, domain))) + + return sorted( + generate_directives, key=lambda directive: directive[2]) + + def write_config(self): + """Write the zone file.""" + # Create GENERATE directives for IPv4 ranges. + generate_directives = list( + chain.from_iterable( + self.get_GENERATE_directives(dynamic_range, self.domain) + for dynamic_range in self._dynamic_ranges + if dynamic_range.version == 4 + )) + self.write_zone_file( + self.target_path, self.make_parameters(), + { + 'mappings': { + 'PTR': self.get_PTR_mapping( + self._mapping, self.domain, self._network), + }, + 'generate_directives': { + 'PTR': generate_directives, + } + } + ) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/driver/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/driver/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/driver/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/driver/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,163 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Hardware Drivers.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - "Architecture", - "ArchitectureRegistry", - "BootResource", - ] - -from abc import ( - ABCMeta, - abstractmethod, - ) - -from provisioningserver.power_schema import JSON_POWER_TYPE_PARAMETERS -from provisioningserver.utils.registry import Registry - - -class Architecture: - - def __init__(self, name, description, pxealiases=None, - kernel_options=None): - """Represents an architecture in the driver context. - - :param name: The architecture name as used in MAAS. - arch/subarch or just arch. - :param description: The human-readable description for the - architecture. - :param pxealiases: The optional list of names used if the - hardware uses a different name when requesting its bootloader. - :param kernel_options: The optional list of kernel options for this - architecture. Anything supplied here supplements the options - provided by MAAS core. - """ - if pxealiases is None: - pxealiases = () - self.name = name - self.description = description - self.pxealiases = pxealiases - self.kernel_options = kernel_options - - -class BootResource: - """Abstraction of ephemerals and pxe resources required for a hardware - driver. - - This resource is responsible for importing and reporting on - what is potentially available in relation to a cluster controller. - """ - - __metaclass__ = ABCMeta - - def __init__(self, name): - self.name = name - - @abstractmethod - def import_resources(self, at_location, filter=None): - """Import the specified resources. - - :param at_location: URL to a Simplestreams index or a local path - to a directory containing boot resources. - :param filter: A simplestreams filter. - e.g. "release=trusty label=beta-2 arch=amd64" - This is ignored if the location is a local path, all resources - at the location will be imported. - TBD: How to provide progress information. - """ - - @abstractmethod - def describe_resources(self, at_location): - """Enumerate all the boot resources. - - :param at_location: URL to a Simplestreams index or a local path - to a directory containing boot resources. - - :return: a list of dictionaries describing the available resources, - which will need to be imported so the driver can use them. - [ - { - "release": "trusty", - "arch": "amd64", - "label": "beta-2", - "size": 12344556, - } - , - ] - """ - - -class HardwareDiscoverContext: - - __metaclass__ = ABCMeta - - @abstractmethod - def startDiscovery(self): - """TBD""" - - @abstractmethod - def stopDiscovery(self): - """TBD""" - - -class ArchitectureRegistry(Registry): - """Registry for architecture classes.""" - - @classmethod - def get_by_pxealias(cls, alias): - for _, arch in cls: - if alias in arch.pxealiases: - return arch - return None - - -class BootResourceRegistry(Registry): - """Registry for boot resource classes.""" - - -class PowerTypeRegistry(Registry): - """Registry for power type classes.""" - - -builtin_architectures = [ - Architecture(name="i386/generic", description="i386"), - Architecture(name="amd64/generic", description="amd64"), - Architecture( - name="arm64/generic", description="arm64/generic", - pxealiases=["arm"]), - Architecture( - name="arm64/xgene-uboot", description="arm64/xgene-uboot", - pxealiases=["arm"]), - Architecture( - name="armhf/highbank", description="armhf/highbank", - pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), - Architecture( - name="armhf/generic", description="armhf/generic", - pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), - # PPC64EL needs a rootdelay for PowerNV. The disk controller - # in the hardware, takes a little bit longer to come up then - # the initrd wants to wait. Set this to 60 seconds, just to - # give the booting machine enough time. This doesn't slow down - # the booting process, it just increases the timeout. - Architecture( - name="ppc64el/generic", description="ppc64el", - kernel_options=['rootdelay=60']), -] -for arch in builtin_architectures: - ArchitectureRegistry.register_item(arch.name, arch) - - -builtin_power_types = JSON_POWER_TYPE_PARAMETERS -for power_type in builtin_power_types: - PowerTypeRegistry.register_item(power_type['name'], power_type) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/driver/tests/test_registries.py maas-1.7.6+bzr3376/src/provisioningserver/driver/tests/test_registries.py --- maas-1.5.4+bzr2294/src/provisioningserver/driver/tests/test_registries.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/driver/tests/test_registries.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -# Copyright 2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the driver registries.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from maastesting.testcase import MAASTestCase -from mock import sentinel -from provisioningserver.driver import ( - Architecture, - ArchitectureRegistry, - BootResourceRegistry, - PowerTypeRegistry, - ) -from provisioningserver.utils.testing import RegistryFixture - - -class TestRegistries(MAASTestCase): - - def setUp(self): - super(TestRegistries, self).setUp() - # Ensure the global registry is empty for each test run. - self.useFixture(RegistryFixture()) - - def test_bootresource_registry(self): - self.assertItemsEqual([], BootResourceRegistry) - BootResourceRegistry.register_item("resource", sentinel.resource) - self.assertIn( - sentinel.resource, - (item for name, item in BootResourceRegistry)) - - def test_architecture_registry(self): - self.assertItemsEqual([], ArchitectureRegistry) - ArchitectureRegistry.register_item("resource", sentinel.resource) - self.assertIn( - sentinel.resource, - (item for name, item in ArchitectureRegistry)) - - def test_get_by_pxealias_returns_valid_arch(self): - arch1 = Architecture( - name="arch1", description="arch1", - pxealiases=["archibald", "reginald"]) - arch2 = Architecture( - name="arch2", description="arch2", - pxealiases=["fake", "foo"]) - ArchitectureRegistry.register_item("arch1", arch1) - ArchitectureRegistry.register_item("arch2", arch2) - self.assertEqual( - arch1, ArchitectureRegistry.get_by_pxealias("archibald")) - - def test_get_by_pxealias_returns_None_if_none_matching(self): - arch1 = Architecture( - name="arch1", description="arch1", - pxealiases=["archibald", "reginald"]) - arch2 = Architecture(name="arch2", description="arch2") - ArchitectureRegistry.register_item("arch1", arch1) - ArchitectureRegistry.register_item("arch2", arch2) - self.assertEqual( - None, ArchitectureRegistry.get_by_pxealias("stinkywinky")) - - def test_power_type_registry(self): - self.assertItemsEqual([], PowerTypeRegistry) - PowerTypeRegistry.register_item("resource", sentinel.resource) - self.assertIn( - sentinel.resource, - (item for name, item in PowerTypeRegistry)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/diskless/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/diskless/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/diskless/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/diskless/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,113 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Base diskless driver.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "DisklessDriver", + "DisklessDriverError", + "DisklessDriverRegistry", + ] + +from abc import ( + ABCMeta, + abstractmethod, + abstractproperty, + ) + +from jsonschema import validate +from provisioningserver.drivers import ( + JSON_SETTING_SCHEMA, + validate_settings, + ) +from provisioningserver.utils.registry import Registry + + +JSON_DISKLESS_DRIVERS_SCHEMA = { + 'title': "Diskless drivers parameters set", + 'type': 'array', + 'items': JSON_SETTING_SCHEMA, +} + + +class DisklessDriverError: + """Error when driver fails to complete the needed task.""" + + +class DisklessDriver: + """Skeleton for a diskless driver.""" + + __metaclass__ = ABCMeta + + def __init__(self): + super(DisklessDriver, self).__init__() + validate_settings(self.get_schema()) + + @abstractproperty + def name(self): + """Name of the diskless driver.""" + + @abstractproperty + def description(self): + """Description of the diskless driver.""" + + @abstractproperty + def settings(self): + """List of settings for the driver. + + Each setting in this list can be changed by the user. They are passed + to the `create_disk` and `delete_disk` using the kwargs. It is up + to the driver to read these options before performing the operation. + """ + + @abstractmethod + def create_disk(self, system_id, source_path, **kwargs): + """Creates the disk for the `system_id` using the `source_path` as + the data to place on the disk initially. + + :param system_id: `Node.system_id` + :param source_path: Path to the source data + :param kwargs: Settings user set from `get_settings`. + :return: Path to the newly created disk. + """ + + @abstractmethod + def delete_disk(self, system_id, disk_path, **kwargs): + """Deletes the disk for the `system_id`. + + :param system_id: `Node.system_id` + :param disk_path: Path returned by `create_disk`. + :param kwargs: Settings user set from `get_settings`. + """ + + def get_schema(self): + """Returns the JSON schema for the driver.""" + return dict( + name=self.name, description=self.description, + fields=self.settings) + + +class DisklessDriverRegistry(Registry): + """Registry for diskless drivers.""" + + @classmethod + def get_schema(cls): + """Returns the full schema for the registry.""" + schemas = [drivers.get_schema() for _, drivers in cls] + validate(schemas, JSON_DISKLESS_DRIVERS_SCHEMA) + return schemas + + +builtin_diskless_drivers = [ + ] +for driver in builtin_diskless_drivers: + DisklessDriverRegistry.register_item(driver.name, driver) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/diskless/tests/test_base.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/diskless/tests/test_base.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/diskless/tests/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/diskless/tests/test_base.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,171 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.drivers.diskless`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver.drivers import ( + make_setting_field, + validate_settings, + ) +from provisioningserver.drivers.diskless import ( + DisklessDriver, + DisklessDriverRegistry, + ) +from provisioningserver.utils.testing import RegistryFixture + + +class FakeDisklessDriver(DisklessDriver): + + name = "" + description = "" + settings = [] + + def __init__(self, name, description, settings): + self.name = name + self.description = description + self.settings = settings + super(FakeDisklessDriver, self).__init__() + + def create_disk(self, system_id, source_path, **kwargs): + raise NotImplementedError() + + def delete_disk(self, system_id, disk_path, **kwargs): + raise NotImplementedError() + + +def make_diskless_driver(name=None, description=None, settings=None): + if name is None: + name = factory.make_name('diskless') + if description is None: + description = factory.make_name('description') + if settings is None: + settings = [] + return FakeDisklessDriver(name, description, settings) + + +class TestFakeDisklessDriver(MAASTestCase): + + def test_attributes(self): + fake_name = factory.make_name('name') + fake_description = factory.make_name('description') + fake_setting = factory.make_name('setting') + fake_settings = [ + make_setting_field( + fake_setting, fake_setting.title()), + ] + attributes = { + 'name': fake_name, + 'description': fake_description, + 'settings': fake_settings, + } + fake_driver = FakeDisklessDriver( + fake_name, fake_description, fake_settings) + self.assertAttributes(fake_driver, attributes) + + def test_make_diskless_driver(self): + fake_name = factory.make_name('name') + fake_description = factory.make_name('description') + fake_setting = factory.make_name('setting') + fake_settings = [ + make_setting_field( + fake_setting, fake_setting.title()), + ] + attributes = { + 'name': fake_name, + 'description': fake_description, + 'settings': fake_settings, + } + fake_driver = make_diskless_driver( + name=fake_name, description=fake_description, + settings=fake_settings) + self.assertAttributes(fake_driver, attributes) + + def test_make_diskless_driver_makes_name_and_description(self): + fake_driver = make_diskless_driver() + self.assertNotEqual("", fake_driver.name) + self.assertNotEqual("", fake_driver.description) + + def test_create_disk_raises_not_implemented(self): + fake_driver = make_diskless_driver() + self.assertRaises( + NotImplementedError, + fake_driver.create_disk, sentinel.system_id, sentinel.source_path) + + def test_delete_disk_raises_not_implemented(self): + fake_driver = make_diskless_driver() + self.assertRaises( + NotImplementedError, + fake_driver.delete_disk, sentinel.system_id, sentinel.disk_path) + + +class TestDisklessDriver(MAASTestCase): + + def test_get_schema(self): + fake_name = factory.make_name('name') + fake_description = factory.make_name('description') + fake_setting = factory.make_name('setting') + fake_settings = [ + make_setting_field( + fake_setting, fake_setting.title()), + ] + fake_driver = make_diskless_driver() + self.assertItemsEqual({ + 'name': fake_name, + 'description': fake_description, + 'fields': fake_settings, + }, + fake_driver.get_schema()) + + def test_get_schema_returns_valid_schema(self): + fake_driver = make_diskless_driver() + #: doesn't raise ValidationError + validate_settings(fake_driver.get_schema()) + + +class TestDisklessDriverRegistry(MAASTestCase): + + def setUp(self): + super(TestDisklessDriverRegistry, self).setUp() + # Ensure the global registry is empty for each test run. + self.useFixture(RegistryFixture()) + + def test_registry(self): + self.assertItemsEqual([], DisklessDriverRegistry) + DisklessDriverRegistry.register_item("driver", sentinel.driver) + self.assertIn( + sentinel.driver, + (item for name, item in DisklessDriverRegistry)) + + def test_get_schema(self): + fake_driver_one = make_diskless_driver() + fake_driver_two = make_diskless_driver() + DisklessDriverRegistry.register_item( + fake_driver_one.name, fake_driver_one) + DisklessDriverRegistry.register_item( + fake_driver_two.name, fake_driver_two) + self.assertItemsEqual([ + { + 'name': fake_driver_one.name, + 'description': fake_driver_one.description, + 'fields': [], + }, + { + 'name': fake_driver_two.name, + 'description': fake_driver_two.description, + 'fields': [], + }], + DisklessDriverRegistry.get_schema()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/mscm.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/mscm.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/mscm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/mscm.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,6 +18,7 @@ __metaclass__ = type __all__ = [ 'power_control_mscm', + 'power_state_mscm', 'probe_and_enlist_mscm', ] @@ -27,7 +28,7 @@ AutoAddPolicy, SSHClient, ) -import provisioningserver.custom_hardware.utils as utils +import provisioningserver.utils as utils cartridge_mapping = { @@ -36,13 +37,23 @@ 'ProLiant m350 Server Cartridge': 'amd64/generic', 'ProLiant m400 Server Cartridge': 'arm64/xgene-uboot', 'ProLiant m500 Server Cartridge': 'amd64/generic', + 'ProLiant m700 Server Cartridge': 'amd64/generic', 'ProLiant m710 Server Cartridge': 'amd64/generic', 'ProLiant m800 Server Cartridge': 'armhf/keystone', - 'Default': 'arm64/generic', + 'Default': 'amd64/generic', } -class MSCM_CLI_API(object): +class MSCMState: + OFF = "Off" + ON = "On" + + +class MSCMError(Exception): + """Failure communicating to MSCM. """ + + +class MSCM_CLI_API: """An API for interacting with the Moonshot iLO CM CLI.""" def __init__(self, host, username, password): @@ -116,7 +127,7 @@ else: return cartridge_mapping['Default'] - def get_node_power_status(self, node_id): + def get_node_power_state(self, node_id): """Get power state of node (on/off). Example of stdout from running "show node power ": @@ -151,30 +162,49 @@ of 'mscm'. """ mscm = MSCM_CLI_API(host, username, password) - power_status = mscm.get_node_power_status(node_id) if power_change == 'off': mscm.power_node_off(node_id) - return + elif power_change == 'on': + if mscm.get_node_power_state(node_id) == MSCMState.ON: + mscm.power_node_off(node_id) + mscm.configure_node_bootonce_pxe(node_id) + mscm.power_node_on(node_id) + else: + raise MSCMError("Unexpected maas power mode.") - if power_change != 'on': - raise AssertionError('Unexpected maas power mode.') - if power_status == 'On': - mscm.power_node_off(node_id) - - mscm.configure_node_bootonce_pxe(node_id) - mscm.power_node_on(node_id) +def power_state_mscm(host, username, password, node_id): + """Return the power state for the mscm machine.""" + mscm = MSCM_CLI_API(host, username, password) + try: + power_state = mscm.get_node_power_state(node_id) + except: + raise MSCMError("Failed to retrieve power state.") + + if power_state == MSCMState.OFF: + return 'off' + elif power_state == MSCMState.ON: + return 'on' + raise MSCMError('Unknown power state: %s' % power_state) def probe_and_enlist_mscm(host, username, password): - """ Extracts all of nodes from mscm, sets all of them to boot via HDD by, + """ Extracts all of nodes from mscm, sets all of them to boot via M.2 by, default, sets them to bootonce via PXE, and then enlists them into MAAS. """ mscm = MSCM_CLI_API(host, username, password) - nodes = mscm.discover_nodes() + try: + # if discover_nodes works, we have access to the system + nodes = mscm.discover_nodes() + except: + raise MSCMError( + "Failed to probe nodes for mscm with host=%s, " + "username=%s, password=%s" + % (host, username, password)) + for node_id in nodes: - # Set default boot to HDD + # Set default boot to M.2 mscm.configure_node_boot_m2(node_id) params = { 'power_address': host, diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/seamicro.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/seamicro.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/seamicro.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/seamicro.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,341 @@ +# Copyright 2013-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'power_control_seamicro15k_v09', + 'power_control_seamicro15k_v2', + 'probe_seamicro15k_and_enlist', + ] + +import httplib +import json +import time +import urllib2 +import urlparse + +from provisioningserver.logger import get_maas_logger +from provisioningserver.utils import create_node +from provisioningserver.utils.url import compose_URL +from seamicroclient import exceptions as seamicro_exceptions +from seamicroclient.v2 import client as seamicro_client + + +maaslog = get_maas_logger("drivers.seamicro") + + +class POWER_STATUS: + ON = 'Power-On' + OFF = 'Power-Off' + RESET = 'Reset' + + +class SeaMicroError(Exception): + """Failure talking to a SeaMicro chassis controller. """ + pass + + +class SeaMicroAPIV09Error(SeaMicroError): + """Failure talking to a SeaMicro API v0.9. """ + + def __init__(self, msg, response_code=None): + super(SeaMicroAPIV09Error, self).__init__(msg) + self.response_code = response_code + + +class SeaMicroAPIV09: + allowed_codes = [httplib.OK, httplib.ACCEPTED, httplib.NOT_MODIFIED] + + def __init__(self, url): + """ + :param url: The URL of the seamicro chassis, e.g.: http://seamciro/v0.9 + :type url: string + """ + self.url = url + self.token = None + + def build_url(self, location, params=None): + """Builds an order-dependent url, as the SeaMicro chassis + requires order-dependent parameters. + """ + if params is None: + params = [] + params = filter(None, params) + return urlparse.urljoin(self.url, location) + '?' + '&'.join(params) + + def parse_response(self, url, response): + """Parses the HTTP response, checking for errors + from the SeaMicro chassis. + """ + if response.getcode() not in self.allowed_codes: + raise SeaMicroAPIV09Error( + "got response code %s" % response.getcode(), + response_code=response.getcode()) + text = response.read() + + # Decode the response, it should be json. If not + # handle that case and set json_data to None, so + # a SeaMicroAPIV09Error can be raised. + try: + json_data = json.loads(text) + except ValueError: + json_data = None + + if not json_data: + raise SeaMicroAPIV09Error( + 'No JSON data found from %s: got %s' % (url, text)) + json_rpc_code = int(json_data['error']['code']) + if json_rpc_code not in self.allowed_codes: + raise SeaMicroAPIV09Error( + 'Got JSON RPC error code %d: %s for %s' % ( + json_rpc_code, + httplib.responses.get(json_rpc_code, 'Unknown!'), + url), + response_code=json_rpc_code) + return json_data + + def get(self, location, params=None): + """Dispatch a GET request to a SeaMicro chassis. + + The seamicro box has order-dependent HTTP parameters, so we build + our own get URL, and use a list vs. a dict for data, as the order is + implicit. + """ + url = self.build_url(location, params) + response = urllib2.urlopen(url) + json_data = self.parse_response(url, response) + + return json_data['result'] + + def put(self, location, params=None): + """Dispatch a PUT request to a SeaMicro chassis. + + The seamicro box has order-dependent HTTP parameters, so we build + our own get URL, and use a list vs. a dict for data, as the order is + implicit. + """ + opener = urllib2.build_opener(urllib2.HTTPHandler) + url = self.build_url(location, params) + request = urllib2.Request(url) + request.get_method = lambda: 'PUT' + request.add_header('content-type', 'text/json') + response = opener.open(request) + json_data = self.parse_response(url, response) + + return json_data['result'] + + def is_logged_in(self): + return self.token is not None + + def login(self, username, password): + if not self.is_logged_in(): + self.token = self.get("login", [username, password]) + + def logout(self): + if self.is_logged_in(): + self.get("logout") + self.token = None + + def servers_all(self): + return self.get("servers/all", [self.token]) + + def servers(self): + return self.get("servers", [self.token]) + + def server_index(self, server_id): + """API v0.9 uses arbitrary indexing, this function converts a server + id to an index that can be used for detailed outputs & commands. + """ + servers = self.servers()['serverId'] + for idx, name in servers.items(): + if name == server_id: + return idx + return None + + def power_server(self, server_id, new_status, do_pxe=False, force=False): + idx = self.server_index(server_id) + if idx is None: + raise SeaMicroAPIV09Error( + 'Failed to retrieve server index, ' + 'invalid server_id: %s' % server_id) + + location = 'servers/%s' % idx + params = ['action=%s' % new_status] + if new_status in [POWER_STATUS.ON, POWER_STATUS.RESET]: + if do_pxe: + params.append("using-pxe=true") + else: + params.append("using-pxe=false") + elif new_status in [POWER_STATUS.OFF]: + if force: + params.append("force=true") + else: + params.append("force=false") + else: + raise SeaMicroAPIV09Error('Invalid power action: %s' % new_status) + + params.append(self.token) + self.put(location, params=params) + return True + + def power_on(self, server_id, do_pxe=False): + return self.power_server(server_id, POWER_STATUS.ON, do_pxe=do_pxe) + + def power_off(self, server_id, force=False): + return self.power_server(server_id, POWER_STATUS.OFF, force=force) + + def reset(self, server_id, do_pxe=False): + return self.power_server(server_id, POWER_STATUS.RESET, do_pxe=do_pxe) + + +def get_seamicro15k_api(version, ip, username, password): + """Gets the api client depending on the version. + Supports v0.9 and v2.0. + + :return: api for version, None if version not supported + """ + if version == 'v0.9': + api = SeaMicroAPIV09(compose_URL('http:///v0.9/', ip)) + try: + api.login(username, password) + except urllib2.URLError: + # Cannot reach using v0.9, might not be supported + return None + return api + elif version == 'v2.0': + url = compose_URL('http:///v2.0', ip) + try: + api = seamicro_client.Client( + auth_url=url, username=username, password=password) + except seamicro_exceptions.ConnectionRefused: + # Cannot reach using v2.0, might no be supported + return None + return api + + +def get_seamicro15k_servers(version, ip, username, password): + """Gets a list of tuples containing (server_id, mac_address) from the + sm15k api version. Supports v0.9 and v2.0. + + :return: list of (server_id, mac_address), None if version not supported + """ + api = get_seamicro15k_api(version, ip, username, password) + if api: + if version == 'v0.9': + return ( + (server['serverId'].split('/')[0], server['serverMacAddr']) + for server in + api.servers_all().values() + # There are 8 network cards attached to these boxes, we only + # use NIC 0 for PXE booting. + if server['serverNIC'] == '0' + ) + elif version == 'v2.0': + servers = [] + for server in api.servers.list(): + id = server.id.split('/')[0] + macs = [nic['macAddr'] for nic in server.nic.values()] + servers.append((id, macs)) + return servers + return None + + +def select_seamicro15k_api_version(power_control): + """Returns the lastest api version to use.""" + if power_control == 'ipmi': + return ['v2.0', 'v0.9'] + if power_control == 'restapi': + return ['v0.9'] + if power_control == 'restapi2': + return ['v2.0'] + raise SeaMicroError( + 'Unsupported power control method: %s.' % power_control) + + +def find_seamicro15k_servers(ip, username, password, power_control): + """Returns the list of servers, using the latest supported api version.""" + api_versions = select_seamicro15k_api_version(power_control) + for version in api_versions: + servers = get_seamicro15k_servers(version, ip, username, password) + if servers is not None: + return servers + raise SeaMicroError('Failure to retrieve servers.') + + +def probe_seamicro15k_and_enlist(ip, username, password, power_control=None): + power_control = power_control or 'ipmi' + + maaslog.info( + "Probing for seamicro15k nodes with arguments " + "ip=%s, username=%s, password=%s, power_control=%s", + ip, username, password, power_control) + servers = find_seamicro15k_servers(ip, username, password, power_control) + for system_id, macs in servers: + params = { + 'power_address': ip, + 'power_user': username, + 'power_pass': password, + 'power_control': power_control, + 'system_id': system_id + } + maaslog.info( + "Found seamicro15k node with macs %s; adding to MAAS with " + "params : %s", macs, params) + create_node(macs, 'amd64', 'sm15k', params) + + +def power_control_seamicro15k_v09(ip, username, password, server_id, + power_change, retry_count=5, retry_wait=1): + server_id = '%s/0' % server_id + api = SeaMicroAPIV09(compose_URL('http:///v0.9/', ip)) + + while retry_count > 0: + api.login(username, password) + try: + if power_change == "on": + api.power_on(server_id, do_pxe=True) + elif power_change == "off": + api.power_off(server_id, force=True) + except SeaMicroAPIV09Error as e: + # Chance that multiple login's are at once, the api + # only supports one at a time. So lets try again after + # a second, up to max retry count. + if e.response_code == 401: + retry_count -= 1 + time.sleep(retry_wait) + continue + else: + raise + break + + +def power_control_seamicro15k_v2(ip, username, password, server_id, + power_change): + server_id = '%s/0' % server_id + api = get_seamicro15k_api('v2.0', ip, username, password) + if api is None: + raise SeaMicroError('Unable to contact BMC controller.') + server = api.servers.get(server_id) + if power_change == "on": + server.power_on(using_pxe=True) + elif power_change == "off": + server.power_off(force=True) + + +def power_query_seamicro15k_v2(ip, username, password, server_id): + server_id = '%s/0' % server_id + api = get_seamicro15k_api('v2.0', ip, username, password) + if api is None: + raise SeaMicroError('Unable to contact BMC controller.') + server = api.servers.get(server_id) + if server.active: + return "on" + return "off" diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_mscm.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_mscm.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_mscm.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_mscm.py 2015-07-10 01:27:14.000000000 +0000 @@ -19,16 +19,24 @@ from StringIO import StringIO from maastesting.factory import factory -from maastesting.matchers import MockCalledOnceWith +from maastesting.matchers import ( + MockAnyCall, + MockCalledOnceWith, + MockCalledWith, + ) from maastesting.testcase import MAASTestCase from mock import Mock from provisioningserver.drivers.hardware.mscm import ( cartridge_mapping, MSCM_CLI_API, + MSCMError, + MSCMState, power_control_mscm, + power_state_mscm, probe_and_enlist_mscm, ) -import provisioningserver.custom_hardware.utils as utils +import provisioningserver.utils as utils +from testtools.matchers import Equals def make_mscm_api(): @@ -52,14 +60,23 @@ def make_show_node_macaddr(length=10): """Make a fake return value for get_node_macaddr.""" - return ''.join((factory.getRandomMACAddress() + ' ') + return ''.join((factory.make_mac_address() + ' ') for _ in xrange(length)) -class TestRunCliCommand(MAASTestCase): - """Tests for ``MSCM_CLI_API.run_cli_command``.""" +class TestMSCMCliApi(MAASTestCase): + """Tests for `MSCM_CLI_API`.""" + + scenarios = [ + ('power_node_on', + dict(method='power_node_on')), + ('power_node_off', + dict(method='power_node_off')), + ('configure_node_bootonce_pxe', + dict(method='configure_node_bootonce_pxe')), + ] - def test_returns_output(self): + def test_run_cli_command_returns_output(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') @@ -69,18 +86,18 @@ output = api._run_cli_command(factory.make_name('command')) self.assertEqual(expected, output) - def test_connects_and_closes_ssh_client(self): + def test_run_cli_command_connects_and_closes_ssh_client(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') ssh_mock.exec_command = Mock(return_value=factory.make_streams()) api._run_cli_command(factory.make_name('command')) - self.assertThat( + self.expectThat( ssh_mock.connect, MockCalledOnceWith( api.host, username=api.username, password=api.password)) - self.assertThat(ssh_mock.close, MockCalledOnceWith()) + self.expectThat(ssh_mock.close, MockCalledOnceWith()) - def test_closes_when_exception_raised(self): + def test_run_cli_command_closes_when_exception_raised(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') @@ -90,11 +107,7 @@ ssh_mock.exec_command = Mock(side_effect=fail) command = factory.make_name('command') self.assertRaises(Exception, api._run_cli_command, command) - self.assertThat(ssh_mock.close, MockCalledOnceWith()) - - -class TestDiscoverNodes(MAASTestCase): - """Tests for ``MSCM_CLI_API.discover_nodes``.""" + self.expectThat(ssh_mock.close, MockCalledOnceWith()) def test_discover_nodes(self): api = make_mscm_api() @@ -106,10 +119,6 @@ output = api.discover_nodes() self.assertEqual(expected, output) - -class TestNodeMACAddress(MAASTestCase): - """Tests for ``MSCM_CLI_API.get_node_macaddr``.""" - def test_get_node_macaddr(self): api = make_mscm_api() expected = make_show_node_macaddr() @@ -120,10 +129,6 @@ self.assertEqual(re.findall(r':'.join(['[0-9a-f]{2}'] * 6), expected), output) - -class TestNodeArch(MAASTestCase): - """Tests for ``MSCM_CLI_API.get_node_arch``.""" - def test_get_node_arch(self): api = make_mscm_api() expected = '\r\n Product Name: ProLiant Moonshot Cartridge\r\n' @@ -134,36 +139,17 @@ key = expected.split('Product Name: ')[1].splitlines()[0] self.assertEqual(cartridge_mapping[key], output) - -class TestGetNodePowerStatus(MAASTestCase): - """Tests for ``MSCM_CLI_API.get_node_power_status``.""" - - def test_get_node_power_status(self): + def test_get_node_power_state(self): api = make_mscm_api() expected = '\r\n Node #1\r\n Power State: On\r\n' cli_mock = self.patch(api, '_run_cli_command') cli_mock.return_value = expected node_id = make_node_id() - output = api.get_node_power_status(node_id) + output = api.get_node_power_state(node_id) self.assertEqual(expected.split('Power State: ')[1].splitlines()[0], output) - -class TestPowerAndConfigureNode(MAASTestCase): - """Tests for ``MSCM_CLI_API.configure_node_bootonce_pxe, - MSCM_CLI_API.power_node_on, and MSCM_CLI_API.power_node_off``. - """ - - scenarios = [ - ('power_node_on()', - dict(method='power_node_on')), - ('power_node_off()', - dict(method='power_node_off')), - ('configure_node_bootonce_pxe()', - dict(method='configure_node_bootonce_pxe')), - ] - - def test_returns_expected_outout(self): + def test_power_and_configure_node_returns_expected_outout(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') @@ -174,86 +160,152 @@ self.assertEqual(expected, output) -class TestPowerControlMSCM(MAASTestCase): - """Tests for ``power_control_ucsm``.""" +class TestMSCMProbeAndEnlist(MAASTestCase): + """Tests for `probe_and_enlist_mscm`.""" - def test_power_control_mscm_on_on(self): - # power_change and power_status are both 'on' + def test_probe_and_enlist(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() + macs = make_show_node_macaddr(4) + arch = 'arm64/xgene-uboot' + discover_nodes_mock = self.patch(MSCM_CLI_API, 'discover_nodes') + discover_nodes_mock.return_value = [node_id] + boot_m2_mock = self.patch(MSCM_CLI_API, 'configure_node_boot_m2') + node_arch_mock = self.patch(MSCM_CLI_API, 'get_node_arch') + node_arch_mock.return_value = arch + node_macs_mock = self.patch(MSCM_CLI_API, 'get_node_macaddr') + node_macs_mock.return_value = macs + create_node_mock = self.patch(utils, 'create_node') + params = { + 'power_address': host, + 'power_user': username, + 'power_pass': password, + 'node_id': node_id, + } + + probe_and_enlist_mscm(host, username, password) + self.expectThat(discover_nodes_mock, MockAnyCall()) + self.expectThat(boot_m2_mock, MockCalledWith(node_id)) + self.expectThat(node_arch_mock, MockCalledOnceWith(node_id)) + self.expectThat(node_macs_mock, MockCalledOnceWith(node_id)) + self.expectThat(create_node_mock, + MockCalledOnceWith(macs, arch, 'mscm', params)) + + def test_probe_and_enlist_discover_nodes_failure(self): + host = factory.make_hostname('mscm') + username = factory.make_name('user') + password = factory.make_name('password') + discover_nodes_mock = self.patch(MSCM_CLI_API, 'discover_nodes') + discover_nodes_mock.side_effect = MSCMError('error') + self.assertRaises( + MSCMError, probe_and_enlist_mscm, host, username, password) + + +class TestMSCMPowerControl(MAASTestCase): + """Tests for `power_control_mscm`.""" + + def test_power_control_error_on_unknown_power_change(self): + host = factory.make_hostname('mscm') + username = factory.make_name('user') + password = factory.make_name('password') + node_id = make_node_id() + power_change = factory.make_name('error') + self.assertRaises( + MSCMError, power_control_mscm, host, + username, password, node_id, power_change) + + def test_power_control_power_change_on_power_state_on(self): + # power_change and current power_state are both 'on' + host = factory.make_hostname('mscm') + username = factory.make_name('user') + password = factory.make_name('password') + node_id = make_node_id() + power_state_mock = self.patch(MSCM_CLI_API, 'get_node_power_state') + power_state_mock.return_value = MSCMState.ON + power_node_off_mock = self.patch(MSCM_CLI_API, 'power_node_off') bootonce_mock = self.patch(MSCM_CLI_API, 'configure_node_bootonce_pxe') - power_status_mock = self.patch(MSCM_CLI_API, 'get_node_power_status') - power_status_mock.return_value = 'On' power_node_on_mock = self.patch(MSCM_CLI_API, 'power_node_on') - power_node_off_mock = self.patch(MSCM_CLI_API, 'power_node_off') power_control_mscm(host, username, password, node_id, power_change='on') - self.assertThat(bootonce_mock, MockCalledOnceWith(node_id)) - self.assertThat(power_node_off_mock, MockCalledOnceWith(node_id)) - self.assertThat(power_node_on_mock, MockCalledOnceWith(node_id)) + self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) + self.expectThat(power_node_off_mock, MockCalledOnceWith(node_id)) + self.expectThat(bootonce_mock, MockCalledOnceWith(node_id)) + self.expectThat(power_node_on_mock, MockCalledOnceWith(node_id)) - def test_power_control_mscm_on_off(self): - # power_change is 'on' and power_status is 'off' + def test_power_control_power_change_on_power_state_off(self): + # power_change is 'on' and current power_state is 'off' host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() + power_state_mock = self.patch(MSCM_CLI_API, 'get_node_power_state') + power_state_mock.return_value = MSCMState.OFF bootonce_mock = self.patch(MSCM_CLI_API, 'configure_node_bootonce_pxe') - power_status_mock = self.patch(MSCM_CLI_API, 'get_node_power_status') - power_status_mock.return_value = 'Off' power_node_on_mock = self.patch(MSCM_CLI_API, 'power_node_on') power_control_mscm(host, username, password, node_id, power_change='on') - self.assertThat(bootonce_mock, MockCalledOnceWith(node_id)) - self.assertThat(power_node_on_mock, MockCalledOnceWith(node_id)) + self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) + self.expectThat(bootonce_mock, MockCalledOnceWith(node_id)) + self.expectThat(power_node_on_mock, MockCalledOnceWith(node_id)) - def test_power_control_mscm_off_on(self): - # power_change is 'off' and power_status is 'on' + def test_power_control_power_change_off_power_state_on(self): + # power_change is 'off' and current power_state is 'on' host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() - power_status_mock = self.patch(MSCM_CLI_API, 'get_node_power_status') - power_status_mock.return_value = 'On' power_node_off_mock = self.patch(MSCM_CLI_API, 'power_node_off') power_control_mscm(host, username, password, node_id, power_change='off') - self.assertThat(power_node_off_mock, MockCalledOnceWith(node_id)) + self.expectThat(power_node_off_mock, MockCalledOnceWith(node_id)) -class TestProbeAndEnlistMSCM(MAASTestCase): - """Tests for ``probe_and_enlist_mscm``.""" +class TestMSCMPowerState(MAASTestCase): + """Tests for `power_state_mscm`.""" - def test_probe_and_enlist(self): + def test_power_state_failed_to_get_state(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() - macs = make_show_node_macaddr(4) - arch = 'arm64/xgene-uboot' - discover_nodes_mock = self.patch(MSCM_CLI_API, 'discover_nodes') - discover_nodes_mock.return_value = [node_id] - boot_m2_mock = self.patch(MSCM_CLI_API, 'configure_node_boot_m2') - node_arch_mock = self.patch(MSCM_CLI_API, 'get_node_arch') - node_arch_mock.return_value = arch - node_macs_mock = self.patch(MSCM_CLI_API, 'get_node_macaddr') - node_macs_mock.return_value = macs - create_node_mock = self.patch(utils, 'create_node') - probe_and_enlist_mscm(host, username, password) - self.assertThat(discover_nodes_mock, MockCalledOnceWith()) - self.assertThat(boot_m2_mock, MockCalledOnceWith(node_id)) - self.assertThat(node_arch_mock, MockCalledOnceWith(node_id)) - self.assertThat(node_macs_mock, MockCalledOnceWith(node_id)) - params = { - 'power_address': host, - 'power_user': username, - 'power_pass': password, - 'node_id': node_id, - } - self.assertThat(create_node_mock, - MockCalledOnceWith(macs, arch, 'mscm', params)) + power_state_mock = self.patch(MSCM_CLI_API, 'get_node_power_state') + power_state_mock.side_effect = MSCMError('error') + self.assertRaises( + MSCMError, power_state_mscm, host, username, password, node_id) + + def test_power_state_get_off(self): + host = factory.make_hostname('mscm') + username = factory.make_name('user') + password = factory.make_name('password') + node_id = make_node_id() + power_state_mock = self.patch(MSCM_CLI_API, 'get_node_power_state') + power_state_mock.return_value = MSCMState.OFF + self.assertThat( + power_state_mscm(host, username, password, node_id), + Equals('off')) + + def test_power_state_get_on(self): + host = factory.make_hostname('mscm') + username = factory.make_name('user') + password = factory.make_name('password') + node_id = make_node_id() + power_state_mock = self.patch(MSCM_CLI_API, 'get_node_power_state') + power_state_mock.return_value = MSCMState.ON + self.assertThat( + power_state_mscm(host, username, password, node_id), + Equals('on')) + + def test_power_state_error_on_unknown_state(self): + host = factory.make_hostname('mscm') + username = factory.make_name('user') + password = factory.make_name('password') + node_id = make_node_id() + power_state_mock = self.patch(MSCM_CLI_API, 'get_node_power_state') + power_state_mock.return_value = factory.make_name('error') + self.assertRaises( + MSCMError, power_state_mscm, host, username, password, node_id) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_seamicro.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_seamicro.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_seamicro.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_seamicro.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,532 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.drivers.hardware.seamicro`. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import json +import urlparse + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCalledWith, + MockCallsMatch, + ) +from maastesting.testcase import MAASTestCase +from mock import ( + call, + Mock, + ) +from provisioningserver.drivers.hardware import seamicro +from provisioningserver.drivers.hardware.seamicro import ( + find_seamicro15k_servers, + power_control_seamicro15k_v09, + power_control_seamicro15k_v2, + power_query_seamicro15k_v2, + POWER_STATUS, + probe_seamicro15k_and_enlist, + SeaMicroAPIV09, + SeaMicroAPIV09Error, + SeaMicroError, + select_seamicro15k_api_version, + ) + + +class FakeResponse: + + def __init__(self, response_code, response, is_json=False): + self.response_code = response_code + self.response = response + if is_json: + self.response = json.dumps(response) + + def getcode(self): + return self.response_code + + def read(self): + return self.response + + +class FakeServer(object): + + def __init__(self, id): + self.id = id + self.nic = {} + + def add_fake_nic(self, id): + self.nic[id] = {'macAddr': factory.make_mac_address()} + + def get_fake_macs(self): + return [nic['macAddr'] for nic in self.nic.values()] + + +class FakeSeaMicroServerManager(object): + + def __init__(self): + self.servers = [] + + def get(self, server_id): + for server in self.servers: + if server_id == server.id: + return server + return None + + def list(self): + return self.servers + + +class FakeSeaMicroClient(object): + pass + + +class TestSeaMicroAPIV09(MAASTestCase): + """Tests for SeaMicroAPIV09.""" + + def test_build_url(self): + url = factory.make_string() + api = SeaMicroAPIV09('http://%s/' % url) + location = factory.make_string() + params = [factory.make_string() for _ in range(3)] + output = api.build_url(location, params) + parsed = urlparse.urlparse(output) + self.assertEqual(url, parsed.netloc) + self.assertEqual(location, parsed.path.split('/')[1]) + self.assertEqual(params, parsed.query.split('&')) + + def test_invalid_reponse_code(self): + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + response = FakeResponse(401, 'Unauthorized') + self.assertRaises( + SeaMicroAPIV09Error, api.parse_response, + url, response) + + def test_invalid_json_response(self): + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + response = FakeResponse(200, factory.make_string()) + self.assertRaises( + SeaMicroAPIV09Error, api.parse_response, + url, response) + + def test_json_error_response(self): + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + data = { + 'error': { + 'code': 401 + } + } + response = FakeResponse(200, data, is_json=True) + self.assertRaises( + SeaMicroAPIV09Error, api.parse_response, + url, response) + + def test_json_valid_response(self): + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + output = factory.make_string() + data = { + 'error': { + 'code': 200 + }, + 'result': { + 'data': output + }, + } + response = FakeResponse(200, data, is_json=True) + result = api.parse_response(url, response) + self.assertEqual(output, result['result']['data']) + + def configure_get_result(self, result=None): + self.patch( + SeaMicroAPIV09, 'get', + Mock(return_value=result)) + + def test_login_and_logout(self): + token = factory.make_string() + self.configure_get_result(token) + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + api.login('username', 'password') + self.assertEqual(token, api.token) + api.logout() + self.assertIsNone(api.token) + + def test_get_server_index(self): + result = { + 'serverId': { + 0: '0/0', + 1: '1/0', + 2: '2/0', + } + } + self.configure_get_result(result) + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + self.assertEqual(0, api.server_index('0/0')) + self.assertEqual(1, api.server_index('1/0')) + self.assertEqual(2, api.server_index('2/0')) + self.assertIsNone(api.server_index('3/0')) + + def configure_put_server_power(self, token=None): + result = { + 'serverId': { + 0: '0/0', + } + } + self.configure_get_result(result) + mock = self.patch( + SeaMicroAPIV09, + 'put') + url = 'http://%s/' % factory.make_string() + api = SeaMicroAPIV09(url) + api.token = token + return mock, api + + def assert_put_power_called(self, mock, idx, new_status, *params): + location = 'servers/%d' % idx + params = ['action=%s' % new_status] + list(params) + self.assertThat(mock, MockCalledOnceWith(location, params=params)) + + def test_put_server_power_on_using_pxe(self): + token = factory.make_string() + mock, api = self.configure_put_server_power(token) + api.power_on('0/0', do_pxe=True) + self.assert_put_power_called( + mock, 0, POWER_STATUS.ON, 'using-pxe=true', token) + + def test_put_server_power_on_not_using_pxe(self): + token = factory.make_string() + mock, api = self.configure_put_server_power(token) + api.power_on('0/0', do_pxe=False) + self.assert_put_power_called( + mock, 0, POWER_STATUS.ON, 'using-pxe=false', token) + + def test_put_server_power_reset_using_pxe(self): + token = factory.make_string() + mock, api = self.configure_put_server_power(token) + api.reset('0/0', do_pxe=True) + self.assert_put_power_called( + mock, 0, POWER_STATUS.RESET, 'using-pxe=true', token) + + def test_put_server_power_reset_not_using_pxe(self): + token = factory.make_string() + mock, api = self.configure_put_server_power(token) + api.reset('0/0', do_pxe=False) + self.assert_put_power_called( + mock, 0, POWER_STATUS.RESET, 'using-pxe=false', token) + + def test_put_server_power_off(self): + token = factory.make_string() + mock, api = self.configure_put_server_power(token) + api.power_off('0/0', force=False) + self.assert_put_power_called( + mock, 0, POWER_STATUS.OFF, 'force=false', token) + + def test_put_server_power_off_force(self): + token = factory.make_string() + mock, api = self.configure_put_server_power(token) + api.power_off('0/0', force=True) + self.assert_put_power_called( + mock, 0, POWER_STATUS.OFF, 'force=true', token) + + +class TestSeaMicro(MAASTestCase): + """Tests for SeaMicro custom hardware.""" + + def test_select_seamicro15k_api_version_ipmi(self): + versions = select_seamicro15k_api_version('ipmi') + self.assertEqual(['v2.0', 'v0.9'], versions) + + def test_select_seamicro15k_api_version_restapi(self): + versions = select_seamicro15k_api_version('restapi') + self.assertEqual(['v0.9'], versions) + + def test_select_seamicro15k_api_version_restapi2(self): + versions = select_seamicro15k_api_version('restapi2') + self.assertEqual(['v2.0'], versions) + + def configure_get_seamicro15k_api(self, return_value=None): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + mock = self.patch( + seamicro, + 'get_seamicro15k_api') + mock.return_value = return_value + return mock, ip, username, password + + def test_find_seamicro15k_servers_impi(self): + mock, ip, username, password = self.configure_get_seamicro15k_api() + self.assertRaises( + SeaMicroError, find_seamicro15k_servers, ip, username, + password, 'ipmi') + self.assertThat( + mock, + MockCallsMatch( + call('v2.0', ip, username, password), + call('v0.9', ip, username, password))) + + def test_find_seamicro15k_servers_restapi(self): + mock, ip, username, password = self.configure_get_seamicro15k_api() + self.assertRaises( + SeaMicroError, find_seamicro15k_servers, ip, username, + password, 'restapi') + self.assertThat( + mock, MockCalledOnceWith('v0.9', ip, username, password)) + + def test_find_seamicro15k_servers_restapi2(self): + mock, ip, username, password = self.configure_get_seamicro15k_api() + self.assertRaises( + SeaMicroError, find_seamicro15k_servers, ip, username, + password, 'restapi2') + self.assertThat( + mock, MockCalledOnceWith('v2.0', ip, username, password)) + + def configure_api_v09_login(self, token=None): + token = token or factory.make_string() + mock = self.patch( + SeaMicroAPIV09, + 'login') + mock.return_value = token + return mock + + def test_probe_seamicro15k_and_enlist_v09(self): + self.configure_api_v09_login() + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + result = { + 0: { + 'serverId': '0/0', + 'serverNIC': '0', + 'serverMacAddr': factory.make_mac_address(), + }, + 1: { + 'serverId': '1/0', + 'serverNIC': '0', + 'serverMacAddr': factory.make_mac_address(), + }, + 2: { + 'serverId': '2/0', + 'serverNIC': '0', + 'serverMacAddr': factory.make_mac_address(), + }, + 3: { + 'serverId': '3/1', + 'serverNIC': '1', + 'serverMacAddr': factory.make_mac_address(), + }, + } + self.patch( + SeaMicroAPIV09, 'get', + Mock(return_value=result)) + mock_create_node = self.patch(seamicro, 'create_node') + + probe_seamicro15k_and_enlist( + ip, username, password, power_control='restapi') + self.assertEqual(3, mock_create_node.call_count) + + last = result[2] + power_params = { + 'power_control': 'restapi', + 'system_id': last['serverId'].split('/')[0], + 'power_address': ip, + 'power_pass': password, + 'power_user': username + } + self.assertThat( + mock_create_node, + MockCalledWith( + last['serverMacAddr'], 'amd64', + 'sm15k', power_params)) + + def test_power_control_seamicro15k_v09(self): + self.configure_api_v09_login() + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + mock = self.patch( + SeaMicroAPIV09, + 'power_server') + + power_control_seamicro15k_v09(ip, username, password, '25', 'on') + self.assertThat( + mock, + MockCalledOnceWith('25/0', POWER_STATUS.ON, do_pxe=True)) + + def test_power_control_seamicro15k_v09_retry_failure(self): + self.configure_api_v09_login() + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + mock = self.patch( + SeaMicroAPIV09, + 'power_server') + mock.side_effect = SeaMicroAPIV09Error("mock error", response_code=401) + + power_control_seamicro15k_v09( + ip, username, password, '25', 'on', + retry_count=5, retry_wait=0) + self.assertEqual(5, mock.call_count) + + def test_power_control_seamicro15k_v09_exception_failure(self): + self.configure_api_v09_login() + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + mock = self.patch( + SeaMicroAPIV09, + 'power_server') + mock.side_effect = SeaMicroAPIV09Error("mock error") + + self.assertRaises( + SeaMicroAPIV09Error, power_control_seamicro15k_v09, + ip, username, password, '25', 'on') + + def test_probe_seamicro15k_and_enlist_v2(self): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + + fake_server_0 = FakeServer('0/0') + fake_server_0.add_fake_nic('0') + fake_server_0.add_fake_nic('1') + fake_server_1 = FakeServer('1/0') + fake_server_1.add_fake_nic('0') + fake_server_1.add_fake_nic('1') + fake_client = FakeSeaMicroClient() + fake_client.servers = FakeSeaMicroServerManager() + fake_client.servers.servers.append(fake_server_0) + fake_client.servers.servers.append(fake_server_1) + mock_get_api = self.patch( + seamicro, + 'get_seamicro15k_api') + mock_get_api.return_value = fake_client + mock_create_node = self.patch(seamicro, 'create_node') + + probe_seamicro15k_and_enlist( + ip, username, password, power_control='restapi2') + self.assertEqual(2, mock_create_node.call_count) + + self.assertThat( + mock_create_node, + MockCallsMatch( + call( + fake_server_0.get_fake_macs(), 'amd64', 'sm15k', + { + 'power_control': 'restapi2', + 'system_id': '0', + 'power_address': ip, + 'power_pass': password, + 'power_user': username + }), + call( + fake_server_1.get_fake_macs(), 'amd64', 'sm15k', + { + 'power_control': 'restapi2', + 'system_id': '1', + 'power_address': ip, + 'power_pass': password, + 'power_user': username + }))) + + def test_power_control_seamicro15k_v2(self): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + + fake_server = FakeServer('0/0') + fake_client = FakeSeaMicroClient() + fake_client.servers = FakeSeaMicroServerManager() + fake_client.servers.servers.append(fake_server) + mock_power_on = self.patch(fake_server, 'power_on') + + mock_get_api = self.patch( + seamicro, + 'get_seamicro15k_api') + mock_get_api.return_value = fake_client + + power_control_seamicro15k_v2(ip, username, password, '0', 'on') + mock_power_on.assert_called() + + def test_power_control_seamicro15k_v2_raises_error_when_api_None(self): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + + mock_get_api = self.patch( + seamicro, + 'get_seamicro15k_api') + mock_get_api.return_value = None + + self.assertRaises( + SeaMicroError, + power_control_seamicro15k_v2, ip, username, password, '0', 'on') + + def test_power_query_seamicro15k_v2_power_on(self): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + + fake_server = FakeServer('0/0') + self.patch(fake_server, 'active', True) + fake_client = FakeSeaMicroClient() + fake_client.servers = FakeSeaMicroServerManager() + fake_client.servers.servers.append(fake_server) + + mock_get_api = self.patch( + seamicro, + 'get_seamicro15k_api') + mock_get_api.return_value = fake_client + + self.assertEqual( + "on", + power_query_seamicro15k_v2(ip, username, password, '0')) + + def test_power_query_seamicro15k_v2_power_off(self): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + + fake_server = FakeServer('0/0') + self.patch(fake_server, 'active', False) + fake_client = FakeSeaMicroClient() + fake_client.servers = FakeSeaMicroServerManager() + fake_client.servers.servers.append(fake_server) + + mock_get_api = self.patch( + seamicro, + 'get_seamicro15k_api') + mock_get_api.return_value = fake_client + + self.assertEqual( + "off", + power_query_seamicro15k_v2(ip, username, password, '0')) + + def test_power_query_seamicro15k_v2_raises_error_when_api_None(self): + ip = factory.make_ipv4_address() + username = factory.make_string() + password = factory.make_string() + + mock_get_api = self.patch( + seamicro, + 'get_seamicro15k_api') + mock_get_api.return_value = None + + self.assertRaises( + SeaMicroError, + power_query_seamicro15k_v2, ip, username, password, '0') diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_ucsm.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_ucsm.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_ucsm.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_ucsm.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,717 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for ``provisioningserver.drivers.hardware.ucsm``.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from itertools import permutations +import random +from StringIO import StringIO +import urllib2 + +from lxml.etree import ( + Element, + SubElement, + XML, + ) +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from maastesting.testcase import MAASTestCase +from mock import ( + ANY, + call, + Mock, + ) +from provisioningserver.drivers.hardware import ucsm +from provisioningserver.drivers.hardware.ucsm import ( + get_children, + get_first_booter, + get_macs, + get_power_command, + get_server_power_control, + get_servers, + get_service_profile, + logged_in, + make_policy_change, + make_request_data, + parse_response, + power_control_ucsm, + power_state_ucsm, + probe_and_enlist_ucsm, + probe_servers, + RO_KEYS, + set_lan_boot_default, + set_server_power_control, + strip_ro_keys, + UCSM_XML_API, + UCSM_XML_API_Error, + ) +import provisioningserver.utils as utils +from testtools.matchers import Equals + + +def make_api(url='http://url', user='u', password='p', + cookie='foo', mock_call=True): + api = UCSM_XML_API(url, user, password) + api.cookie = cookie + return api + + +def make_api_patch_call(testcase, *args, **kwargs): + api = make_api(*args, **kwargs) + mock = testcase.patch(api, '_call') + return api, mock + + +def make_fake_result(root_class, child_tag, container='outConfigs'): + fake_result = Element(root_class) + outConfigs = SubElement(fake_result, container) + outConfigs.append(Element(child_tag)) + return outConfigs + + +def make_class(): + return factory.make_name('class') + + +def make_dn(): + return factory.make_name('dn') + + +def make_server(): + return factory.make_name('server') + + +class TestUCSMXMLAPIError(MAASTestCase): + """Tests for ``UCSM_XML_API_Error``.""" + + def test_includes_code_and_msg(self): + def raise_error(): + raise UCSM_XML_API_Error('bad', 4224) + + error = self.assertRaises(UCSM_XML_API_Error, raise_error) + + self.assertEqual('bad', error.args[0]) + self.assertEqual(4224, error.code) + + +class TestMakeRequestData(MAASTestCase): + """Tests for ``make_request_data``.""" + + def test_no_children(self): + fields = {'hello': 'there'} + request_data = make_request_data('foo', fields) + root = XML(request_data) + self.assertEqual('foo', root.tag) + self.assertEqual('there', root.get('hello')) + + def test_with_children(self): + fields = {'hello': 'there'} + children_tags = ['bar', 'baz'] + children = [Element(child_tag) for child_tag in children_tags] + request_data = make_request_data('foo', fields, children) + root = XML(request_data) + self.assertEqual('foo', root.tag) + self.assertItemsEqual(children_tags, (e.tag for e in root)) + + def test_no_fields(self): + request_data = make_request_data('foo') + root = XML(request_data) + self.assertEqual('foo', root.tag) + + +class TestParseResonse(MAASTestCase): + """Tests for ``parse_response``.""" + + def test_no_error(self): + xml = '' + response = parse_response(xml) + self.assertEqual('foo', response.tag) + + def test_error(self): + xml = '' + self.assertRaises(UCSM_XML_API_Error, parse_response, xml) + + +class TestLogin(MAASTestCase): + """"Tests for ``UCSM_XML_API.login``.""" + + def test_login_assigns_cookie(self): + cookie = 'chocolate chip' + api, mock = make_api_patch_call(self) + mock.return_value = Element('aaaLogin', {'outCookie': cookie}) + api.login() + self.assertEqual(cookie, api.cookie) + + def test_login_call_parameters(self): + user = 'user' + password = 'pass' + api, mock = make_api_patch_call(self, user=user, password=password) + api.login() + fields = {'inName': user, 'inPassword': password} + self.assertThat(mock, MockCalledOnceWith('aaaLogin', fields)) + + +class TestLogout(MAASTestCase): + """"Tests for ``UCSM_XML_API.logout``.""" + + def test_logout_clears_cookie(self): + api = make_api() + self.patch(api, '_call') + api.logout() + self.assertIsNone(api.cookie) + + def test_logout_uses_cookie(self): + api, mock = make_api_patch_call(self) + cookie = api.cookie + api.logout() + fields = {'inCookie': cookie} + self.assertThat(mock, MockCalledOnceWith('aaaLogout', fields)) + + +class TestConfigResolveClass(MAASTestCase): + """"Tests for ``UCSM_XML_API.config_resolve_class``.""" + + def test_no_filters(self): + class_id = make_class() + api, mock = make_api_patch_call(self) + api.config_resolve_class(class_id) + fields = {'cookie': api.cookie, 'classId': class_id} + self.assertThat(mock, MockCalledOnceWith('configResolveClass', fields, + ANY)) + + def test_with_filters(self): + class_id = make_class() + filter_element = Element('hi') + api, mock = make_api_patch_call(self) + api.config_resolve_class(class_id, [filter_element]) + in_filters = mock.call_args[0][2] + self.assertEqual([filter_element], in_filters[0][:]) + + def test_return_response(self): + api, mock = make_api_patch_call(self) + mock.return_value = Element('test') + result = api.config_resolve_class('c') + self.assertEqual(mock.return_value, result) + + +class TestConfigResolveChildren(MAASTestCase): + """"Tests for ``UCSM_XML_API.config_resolve_children``.""" + + def test_parameters(self): + dn = make_dn() + class_id = make_class() + api, mock = make_api_patch_call(self) + api.config_resolve_children(dn, class_id) + fields = {'inDn': dn, 'classId': class_id, 'cookie': api.cookie} + self.assertThat(mock, + MockCalledOnceWith('configResolveChildren', fields)) + + def test_no_class_id(self): + dn = make_dn() + api, mock = make_api_patch_call(self) + api.config_resolve_children(dn) + fields = {'inDn': dn, 'cookie': api.cookie} + self.assertThat(mock, + MockCalledOnceWith('configResolveChildren', fields)) + + def test_return_response(self): + api, mock = make_api_patch_call(self) + mock.return_value = Element('test') + result = api.config_resolve_children('d', 'c') + self.assertEqual(mock.return_value, result) + + +class TestConfigConfMo(MAASTestCase): + """"Tests for ``UCSM_XML_API.config_conf_mo``.""" + + def test_parameters(self): + dn = make_dn() + config_items = [Element('hi')] + api, mock = make_api_patch_call(self) + api.config_conf_mo(dn, config_items) + fields = {'dn': dn, 'cookie': api.cookie} + self.assertThat(mock, MockCalledOnceWith('configConfMo', fields, ANY)) + in_configs = mock.call_args[0][2] + self.assertEqual(config_items, in_configs[0][:]) + + +class TestCall(MAASTestCase): + """"Tests for ``UCSM_XML_API._call``.""" + + def test_call(self): + name = 'method' + fields = {1: 2} + children = [3, 4] + request = '' + response = Element('good') + api = make_api() + + mock_make_request_data = self.patch(ucsm, 'make_request_data') + mock_make_request_data.return_value = request + + mock_send_request = self.patch(api, '_send_request') + mock_send_request.return_value = response + + api._call(name, fields, children) + self.assertThat(mock_make_request_data, + MockCalledOnceWith(name, fields, children)) + self.assertThat(mock_send_request, MockCalledOnceWith(request)) + + +class TestSendRequest(MAASTestCase): + """"Tests for ``UCSM_XML_API._send_request``.""" + + def test_send_request(self): + request_data = 'foo' + api = make_api() + self.patch(api, '_call') + stream = StringIO('') + mock = self.patch(urllib2, 'urlopen') + mock.return_value = stream + response = api._send_request(request_data) + self.assertEqual('hi', response.tag) + urllib_request = mock.call_args[0][0] + self.assertEqual(request_data, urllib_request.data) + + +class TestConfigResolveDn(MAASTestCase): + """Tests for ``UCSM_XML_API.config_resolve_dn``.""" + + def test_parameters(self): + api, mock = make_api_patch_call(self) + test_dn = make_dn() + fields = {'cookie': api.cookie, 'dn': test_dn} + api.config_resolve_dn(test_dn) + self.assertThat(mock, + MockCalledOnceWith('configResolveDn', fields)) + + +class TestGetServers(MAASTestCase): + """Tests for ``get_servers``.""" + + def test_uses_uuid(self): + uuid = factory.make_UUID() + api = make_api() + mock = self.patch(api, 'config_resolve_class') + get_servers(api, uuid) + filters = mock.call_args[0][1] + attrib = {'class': 'computeItem', 'property': 'uuid', 'value': uuid} + self.assertEqual(attrib, filters[0].attrib) + + def test_returns_result(self): + uuid = factory.make_UUID() + api = make_api() + fake_result = make_fake_result('configResolveClass', 'found') + self.patch(api, 'config_resolve_class').return_value = fake_result + result = get_servers(api, uuid) + self.assertEqual('found', result[0].tag) + + def test_class_id(self): + uuid = factory.make_UUID() + api = make_api() + mock = self.patch(api, 'config_resolve_class') + get_servers(api, uuid) + self.assertThat(mock, MockCalledOnceWith('computeItem', ANY)) + + +class TestGetChildren(MAASTestCase): + """Tests for ``get_children``.""" + + def test_returns_result(self): + search_class = make_class() + api = make_api() + fake_result = make_fake_result('configResolveChildren', search_class) + self.patch(api, 'config_resolve_children').return_value = fake_result + in_element = Element('test', {'dn': make_dn()}) + class_id = search_class + result = get_children(api, in_element, class_id) + self.assertEqual(search_class, result[0].tag) + + def test_parameters(self): + search_class = make_class() + parent_dn = make_dn() + api = make_api() + mock = self.patch(api, 'config_resolve_children') + in_element = Element('test', {'dn': parent_dn}) + class_id = search_class + get_children(api, in_element, class_id) + self.assertThat(mock, MockCalledOnceWith(parent_dn, search_class)) + + +class TestGetMacs(MAASTestCase): + """Tests for ``get_macs``.""" + + def test_gets_adaptors(self): + adaptor = 'adaptor' + server = make_server() + mac = 'xx' + api = make_api() + mock = self.patch(ucsm, 'get_children') + + def fake_get_children(api, element, class_id): + if class_id == 'adaptorUnit': + return [adaptor] + elif class_id == 'adaptorHostEthIf': + return [Element('ethif', {'mac': mac})] + + mock.side_effect = fake_get_children + macs = get_macs(api, server) + self.assertThat(mock, MockCallsMatch( + call(api, server, 'adaptorUnit'), + call(api, adaptor, 'adaptorHostEthIf'))) + self.assertEqual([mac], macs) + + +class TestProbeServers(MAASTestCase): + """Tests for ``probe_servers``.""" + + def test_uses_api(self): + api = make_api() + mock = self.patch(ucsm, 'get_servers') + probe_servers(api) + self.assertThat(mock, MockCalledOnceWith(api)) + + def test_returns_results(self): + servers = [{'uuid': factory.make_UUID()}] + mac = 'mac' + api = make_api() + self.patch(ucsm, 'get_servers').return_value = servers + self.patch(ucsm, 'get_macs').return_value = [mac] + server_list = probe_servers(api) + self.assertEqual([(servers[0], [mac])], server_list) + + +class TestGetServerPowerControl(MAASTestCase): + """Tests for ``get_server_power_control``.""" + + def test_get_server_power_control(self): + api = make_api() + mock = self.patch(api, 'config_resolve_children') + fake_result = make_fake_result('configResolveChildren', 'lsPower') + mock.return_value = fake_result + dn = make_dn() + server = Element('computeItem', {'assignedToDn': dn}) + power_control = get_server_power_control(api, server) + self.assertThat(mock, MockCalledOnceWith(dn, 'lsPower')) + self.assertEqual('lsPower', power_control.tag) + + +class TestSetServerPowerControl(MAASTestCase): + """Tests for ``set_server_power_control``.""" + + def test_set_server_power_control(self): + api = make_api() + power_dn = make_dn() + power_control = Element('lsPower', {'dn': power_dn}) + config_conf_mo_mock = self.patch(api, 'config_conf_mo') + state = 'state' + set_server_power_control(api, power_control, state) + self.assertThat(config_conf_mo_mock, MockCalledOnceWith(power_dn, ANY)) + power_change = config_conf_mo_mock.call_args[0][1][0] + self.assertEqual(power_change.tag, 'lsPower') + self.assertEqual({'state': state, 'dn': power_dn}, power_change.attrib) + + +class TestLoggedIn(MAASTestCase): + """Tests for ``logged_in``.""" + + def test_logged_in(self): + mock = self.patch(ucsm, 'UCSM_XML_API') + url = 'url' + username = 'username' + password = 'password' + mock.return_value = Mock() + + with logged_in(url, username, password) as api: + self.assertEqual(mock.return_value, api) + self.assertThat(api.login, MockCalledOnceWith()) + + self.assertThat(mock.return_value.logout, MockCalledOnceWith()) + + +class TestValidGetPowerCommand(MAASTestCase): + scenarios = [ + ('Power On', dict( + power_mode='on', current_state='down', command='admin-up')), + ('Power On', dict( + power_mode='on', current_state='up', command='cycle-immediate')), + ('Power Off', dict( + power_mode='off', current_state='up', command='admin-down')), + ] + + def test_get_power_command(self): + command = get_power_command(self.power_mode, self.current_state) + self.assertEqual(self.command, command) + + +class TestInvalidGetPowerCommand(MAASTestCase): + + def test_get_power_command_raises_assertion_error_on_bad_power_mode(self): + bad_power_mode = factory.make_name('unlikely') + error = self.assertRaises(UCSM_XML_API_Error, get_power_command, + bad_power_mode, None) + self.assertIn(bad_power_mode, error.args[0]) + + +class TestPowerControlUCSM(MAASTestCase): + """Tests for ``power_control_ucsm``.""" + + def test_power_control_ucsm(self): + uuid = factory.make_UUID() + api = Mock() + self.patch(ucsm, 'UCSM_XML_API').return_value = api + get_servers_mock = self.patch(ucsm, 'get_servers') + server = make_server() + state = 'admin-down' + power_control = Element('lsPower', {'state': state}) + get_servers_mock.return_value = [server] + get_server_power_control_mock = self.patch(ucsm, + 'get_server_power_control') + get_server_power_control_mock.return_value = power_control + set_server_power_control_mock = self.patch(ucsm, + 'set_server_power_control') + power_control_ucsm('url', 'username', 'password', uuid, + 'off') + self.assertThat(get_servers_mock, MockCalledOnceWith(api, uuid)) + self.assertThat(set_server_power_control_mock, + MockCalledOnceWith(api, power_control, state)) + + +class TestUCSMPowerState(MAASTestCase): + """Tests for `power_state_ucsm`.""" + + def test_power_state_get_off(self): + url = factory.make_name('url') + username = factory.make_name('username') + password = factory.make_name('password') + uuid = factory.make_UUID() + api = Mock() + self.patch(ucsm, 'UCSM_XML_API').return_value = api + get_servers_mock = self.patch(ucsm, 'get_servers') + server = make_server() + current_state = 'down' + power_control = Element('lsPower', {'state': current_state}) + get_servers_mock.return_value = [server] + get_server_power_control_mock = self.patch( + ucsm, 'get_server_power_control') + get_server_power_control_mock.return_value = power_control + + power_state = power_state_ucsm(url, username, password, uuid) + self.expectThat(get_servers_mock, MockCalledOnceWith(api, uuid)) + self.expectThat( + get_server_power_control_mock, + MockCalledOnceWith(api, server)) + self.expectThat(power_state, Equals('off')) + + def test_power_state_get_on(self): + url = factory.make_name('url') + username = factory.make_name('username') + password = factory.make_name('password') + uuid = factory.make_UUID() + api = Mock() + self.patch(ucsm, 'UCSM_XML_API').return_value = api + get_servers_mock = self.patch(ucsm, 'get_servers') + server = make_server() + current_state = 'up' + power_control = Element('lsPower', {'state': current_state}) + get_servers_mock.return_value = [server] + get_server_power_control_mock = self.patch( + ucsm, 'get_server_power_control') + get_server_power_control_mock.return_value = power_control + + power_state = power_state_ucsm(url, username, password, uuid) + self.expectThat(get_servers_mock, MockCalledOnceWith(api, uuid)) + self.expectThat( + get_server_power_control_mock, + MockCalledOnceWith(api, server)) + self.expectThat(power_state, Equals('on')) + + def test_power_state_error_on_unknown_state(self): + url = factory.make_name('url') + username = factory.make_name('username') + password = factory.make_name('password') + uuid = factory.make_UUID() + api = Mock() + self.patch(ucsm, 'UCSM_XML_API').return_value = api + get_servers_mock = self.patch(ucsm, 'get_servers') + server = make_server() + current_state = factory.make_name('error') + power_control = Element('lsPower', {'state': current_state}) + get_servers_mock.return_value = [server] + get_server_power_control_mock = self.patch( + ucsm, 'get_server_power_control') + get_server_power_control_mock.return_value = power_control + + self.assertRaises( + UCSM_XML_API_Error, power_state_ucsm, url, + username, password, uuid) + + +class TestProbeAndEnlistUCSM(MAASTestCase): + """Tests for ``probe_and_enlist_ucsm``.""" + + def test_probe_and_enlist(self): + url = 'url' + username = 'username' + password = 'password' + api = Mock() + self.patch(ucsm, 'UCSM_XML_API').return_value = api + server_element = {'uuid': 'uuid'} + server = (server_element, ['mac'],) + probe_servers_mock = self.patch(ucsm, 'probe_servers') + probe_servers_mock.return_value = [server] + set_lan_boot_default_mock = self.patch(ucsm, 'set_lan_boot_default') + create_node_mock = self.patch(utils, 'create_node') + probe_and_enlist_ucsm(url, username, password) + self.assertThat(set_lan_boot_default_mock, + MockCalledOnceWith(api, server_element)) + self.assertThat(probe_servers_mock, MockCalledOnceWith(api)) + params = { + 'power_address': url, + 'power_user': username, + 'power_pass': password, + 'uuid': server[0]['uuid'] + } + self.assertThat(create_node_mock, + MockCalledOnceWith(server[1], 'amd64', 'ucsm', params)) + + +class TestGetServiceProfile(MAASTestCase): + """Tests for ``get_service_profile.``""" + + def test_get_service_profile(self): + test_dn = make_dn() + server = Element('computeBlade', {'assignedToDn': test_dn}) + api = make_api() + mock = self.patch(api, 'config_resolve_dn') + mock.return_value = make_fake_result('configResolveDn', 'lsServer', + 'outConfig') + service_profile = get_service_profile(api, server) + self.assertThat(mock, MockCalledOnceWith(test_dn)) + self.assertEqual(mock.return_value[0], service_profile) + + +def make_boot_order_scenarios(size): + """Produce test scenarios for testing get_first_booter. + + Each scenario is one of the permutations of a set of ``size`` + elements, where each element has an integer 'order' attribute + that get_first_booter will use to determine which device boots + first. + """ + minimum = random.randint(0, 500) + ordinals = xrange(minimum, minimum + size) + + elements = [ + Element('Entry%d' % i, {'order': '%d' % i}) + for i in ordinals + ] + + orders = permutations(elements) + orders = [{'order': order} for order in orders] + + scenarios = [('%d' % i, order) for i, order in enumerate(orders)] + return scenarios, minimum + + +class TestGetFirstBooter(MAASTestCase): + """Tests for ``get_first_booter.``""" + + scenarios, minimum = make_boot_order_scenarios(3) + + def test_first_booter(self): + """Ensure the boot device is picked according to the order + attribute, not the order of elements in the list of devices.""" + root = Element('outConfigs') + root.extend(self.order) + picked = get_first_booter(root) + self.assertEqual(self.minimum, int(picked.get('order'))) + + +class TestsForStripRoKeys(MAASTestCase): + """Tests for ``strip_ro_keys.``""" + + def test_strip_ro_keys(self): + attributes = {key: 'DC' for key in RO_KEYS} + + elements = [ + Element('Element%d' % i, attributes) + for i in xrange(random.randint(0, 10)) + ] + + strip_ro_keys(elements) + + for key in RO_KEYS: + values = [element.get(key) for element in elements] + for value in values: + self.assertIsNone(value) + + +class TestMakePolicyChange(MAASTestCase): + """Tests for ``make_policy_change``.""" + + def test_lan_already_top_priority(self): + boot_profile_response = make_fake_result('configResolveChildren', + 'lsbootLan') + mock = self.patch(ucsm, 'get_first_booter') + mock.return_value = boot_profile_response[0] + change = make_policy_change(boot_profile_response) + self.assertIsNone(change) + self.assertThat(mock, MockCalledOnceWith(boot_profile_response)) + + def test_change_lan_to_top_priority(self): + boot_profile_response = Element('outConfigs') + lan_boot = Element('lsbootLan', {'order': 'second'}) + storage_boot = Element('lsbootStorage', {'order': 'first'}) + boot_profile_response.extend([lan_boot, storage_boot]) + self.patch(ucsm, 'get_first_booter').return_value = storage_boot + self.patch(ucsm, 'strip_ro_keys') + change = make_policy_change(boot_profile_response) + lan_boot_order = change.xpath('//lsbootPolicy/lsbootLan/@order') + storage_boot_order = \ + change.xpath('//lsbootPolicy/lsbootStorage/@order') + self.assertEqual(['first'], lan_boot_order) + self.assertEqual(['second'], storage_boot_order) + + +class TestSetLanBootDefault(MAASTestCase): + """Tets for ``set_lan_boot_default.``""" + + def test_no_change(self): + api = make_api() + server = make_server() + self.patch(ucsm, 'get_service_profile') + self.patch(api, 'config_resolve_children') + self.patch(ucsm, 'make_policy_change').return_value = None + config_conf_mo = self.patch(api, 'config_conf_mo') + set_lan_boot_default(api, server) + self.assertThat(config_conf_mo, MockNotCalled()) + + def test_with_change(self): + api = make_api() + server = make_server() + test_dn = make_dn() + test_change = 'change' + service_profile = Element('test', {'operBootPolicyName': test_dn}) + self.patch(ucsm, 'get_service_profile').return_value = service_profile + self.patch(api, 'config_resolve_children') + self.patch(ucsm, 'make_policy_change').return_value = test_change + config_conf_mo = self.patch(api, 'config_conf_mo') + set_lan_boot_default(api, server) + self.assertThat(config_conf_mo, + MockCalledOnceWith(test_dn, [test_change])) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_virsh.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_virsh.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/tests/test_virsh.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/tests/test_virsh.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,413 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.drivers.hardware.virsh`. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import random +from textwrap import dedent + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + ) +from maastesting.testcase import MAASTestCase +from mock import call +from provisioningserver.drivers.hardware import virsh +import provisioningserver.utils as utils + + +SAMPLE_IFLIST = dedent(""" + Interface Type Source Model MAC + ------------------------------------------------------- + - bridge br0 e1000 %s + - bridge br1 e1000 %s + """) + +SAMPLE_DUMPXML = dedent(""" + + test + 4096576 + 4096576 + 1 + + hvm + + + + """) + + +class TestVirshSSH(MAASTestCase): + """Tests for `VirshSSH`.""" + + def configure_virshssh_pexpect(self, inputs=None, dom_prefix=None): + """Configures the VirshSSH class to use 'cat' process + for testing instead of the actual virsh.""" + conn = virsh.VirshSSH(timeout=0.1, dom_prefix=dom_prefix) + self.addCleanup(conn.close) + self.patch(conn, '_execute') + conn._spawn('cat') + if inputs is not None: + for line in inputs: + conn.sendline(line) + return conn + + def configure_virshssh(self, output, dom_prefix=None): + self.patch(virsh.VirshSSH, 'run').return_value = output + return virsh.VirshSSH(dom_prefix=dom_prefix) + + def test_login_prompt(self): + virsh_outputs = [ + 'virsh # ' + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + self.assertTrue(conn.login(poweraddr=None)) + + def test_login_with_sshkey(self): + virsh_outputs = [ + "The authenticity of host '127.0.0.1' can't be established.", + "ECDSA key fingerprint is " + "00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff.", + "Are you sure you want to continue connecting (yes/no)? ", + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + mock_sendline = self.patch(conn, 'sendline') + conn.login(poweraddr=None) + self.assertThat(mock_sendline, MockCalledOnceWith('yes')) + + def test_login_with_password(self): + virsh_outputs = [ + "ubuntu@%s's password: " % factory.make_ipv4_address(), + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + fake_password = factory.make_name('password') + mock_sendline = self.patch(conn, 'sendline') + conn.login(poweraddr=None, password=fake_password) + self.assertThat(mock_sendline, MockCalledOnceWith(fake_password)) + + def test_login_missing_password(self): + virsh_outputs = [ + "ubuntu@%s's password: " % factory.make_ipv4_address(), + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + mock_close = self.patch(conn, 'close') + self.assertFalse(conn.login(poweraddr=None, password=None)) + mock_close.assert_called() + + def test_login_invalid(self): + virsh_outputs = [ + factory.make_string(), + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + mock_close = self.patch(conn, 'close') + self.assertFalse(conn.login(poweraddr=None)) + mock_close.assert_called() + + def test_logout(self): + conn = self.configure_virshssh_pexpect() + mock_sendline = self.patch(conn, 'sendline') + mock_close = self.patch(conn, 'close') + conn.logout() + self.assertThat(mock_sendline, MockCalledOnceWith('quit')) + mock_close.assert_called() + + def test_prompt(self): + virsh_outputs = [ + 'virsh # ' + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + self.assertTrue(conn.prompt()) + + def test_invalid_prompt(self): + virsh_outputs = [ + factory.make_string() + ] + conn = self.configure_virshssh_pexpect(virsh_outputs) + self.assertFalse(conn.prompt()) + + def test_run(self): + cmd = ['list', '--all', '--name'] + expected = ' '.join(cmd) + names = [factory.make_name('machine') for _ in range(3)] + conn = self.configure_virshssh_pexpect() + conn.before = '\n'.join([expected] + names) + mock_sendline = self.patch(conn, 'sendline') + mock_prompt = self.patch(conn, 'prompt') + output = conn.run(cmd) + self.assertThat(mock_sendline, MockCalledOnceWith(expected)) + mock_prompt.assert_called() + self.assertEqual('\n'.join(names), output) + + def test_list(self): + names = [factory.make_name('machine') for _ in range(3)] + conn = self.configure_virshssh('\n'.join(names)) + expected = conn.list() + self.assertItemsEqual(names, expected) + + def test_list_dom_prefix(self): + prefix = 'dom_prefix' + names = [prefix + factory.make_name('machine') for _ in range(3)] + conn = self.configure_virshssh('\n'.join(names), dom_prefix=prefix) + expected = conn.list() + self.assertItemsEqual(names, expected) + + def test_get_state(self): + state = factory.make_name('state') + conn = self.configure_virshssh(state) + expected = conn.get_state('') + self.assertEqual(state, expected) + + def test_get_state_error(self): + conn = self.configure_virshssh('error') + expected = conn.get_state('') + self.assertEqual(None, expected) + + def test_mac_addresses_returns_list(self): + macs = [factory.make_mac_address() for _ in range(2)] + output = SAMPLE_IFLIST % (macs[0], macs[1]) + conn = self.configure_virshssh(output) + expected = conn.get_mac_addresses('') + self.assertEqual(macs, expected) + + def test_get_arch_returns_valid(self): + arch = factory.make_name('arch') + output = SAMPLE_DUMPXML % arch + conn = self.configure_virshssh(output) + expected = conn.get_arch('') + self.assertEqual(arch, expected) + + def test_get_arch_returns_valid_fixed(self): + arch = random.choice(virsh.ARCH_FIX.keys()) + fixed_arch = virsh.ARCH_FIX[arch] + output = SAMPLE_DUMPXML % arch + conn = self.configure_virshssh(output) + expected = conn.get_arch('') + self.assertEqual(fixed_arch, expected) + + +class TestVirsh(MAASTestCase): + """Tests for `probe_virsh_and_enlist`.""" + + def test_probe_and_enlist(self): + # Patch VirshSSH list so that some machines are returned + # with some fake architectures. + machines = [factory.make_name('machine') for _ in range(3)] + self.patch(virsh.VirshSSH, 'list').return_value = machines + fake_arch = factory.make_name('arch') + mock_arch = self.patch(virsh.VirshSSH, 'get_arch') + mock_arch.return_value = fake_arch + + # Patch get_state so that one of the machines is on, so we + # can check that it will be forced off. + fake_states = [ + virsh.VirshVMState.ON, + virsh.VirshVMState.OFF, + virsh.VirshVMState.OFF + ] + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.side_effect = fake_states + + # Setup the power parameters that we should expect to be + # the output of the probe_and_enlist + fake_password = factory.make_string() + poweraddr = factory.make_name('poweraddr') + called_params = [] + fake_macs = [] + for machine in machines: + macs = [factory.make_mac_address() for _ in range(3)] + fake_macs.append(macs) + called_params.append({ + 'power_address': poweraddr, + 'power_id': machine, + 'power_pass': fake_password, + }) + + # Patch the get_mac_addresses so we get a known list of + # mac addresses for each machine. + mock_macs = self.patch(virsh.VirshSSH, 'get_mac_addresses') + mock_macs.side_effect = fake_macs + + # Patch the poweroff and create as we really don't want these + # actions to occur, but want to also check that they are called. + mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') + mock_create = self.patch(utils, 'create_node') + + # Patch login and logout so that we don't really contact + # a server at the fake poweraddr + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_logout = self.patch(virsh.VirshSSH, 'logout') + + # Perform the probe and enlist + virsh.probe_virsh_and_enlist(poweraddr, password=fake_password) + + # Check that login was called with the provided poweraddr and + # password. + self.assertThat( + mock_login, MockCalledOnceWith(poweraddr, fake_password)) + + # The first machine should have poweroff called on it, as it + # was initial in the on state. + self.assertThat( + mock_poweroff, MockCalledOnceWith(machines[0])) + + # Check that the create command had the correct parameters for + # each machine. + self.assertThat( + mock_create, MockCallsMatch( + call(fake_macs[0], fake_arch, 'virsh', called_params[0]), + call(fake_macs[1], fake_arch, 'virsh', called_params[1]), + call(fake_macs[2], fake_arch, 'virsh', called_params[2]))) + mock_logout.assert_called() + + def test_probe_and_enlist_login_failure(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = False + self.assertRaises( + virsh.VirshError, virsh.probe_virsh_and_enlist, + factory.make_name('poweraddr'), password=factory.make_string()) + + +class TestVirshPowerControl(MAASTestCase): + """Tests for `power_control_virsh`.""" + + def test_power_control_login_failure(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = False + self.assertRaises( + virsh.VirshError, virsh.power_control_virsh, + factory.make_name('poweraddr'), factory.make_name('machine'), + 'on', password=factory.make_string()) + + def test_power_control_on(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = virsh.VirshVMState.OFF + mock_poweron = self.patch(virsh.VirshSSH, 'poweron') + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + virsh.power_control_virsh(poweraddr, machine, 'on') + + self.assertThat( + mock_login, MockCalledOnceWith(poweraddr, None)) + self.assertThat( + mock_state, MockCalledOnceWith(machine)) + self.assertThat( + mock_poweron, MockCalledOnceWith(machine)) + + def test_power_control_off(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = virsh.VirshVMState.ON + mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + virsh.power_control_virsh(poweraddr, machine, 'off') + + self.assertThat( + mock_login, MockCalledOnceWith(poweraddr, None)) + self.assertThat( + mock_state, MockCalledOnceWith(machine)) + self.assertThat( + mock_poweroff, MockCalledOnceWith(machine)) + + def test_power_control_bad_domain(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = None + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + self.assertRaises( + virsh.VirshError, virsh.power_control_virsh, + poweraddr, machine, 'on') + + def test_power_control_power_failure(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = virsh.VirshVMState.ON + mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') + mock_poweroff.return_value = False + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + self.assertRaises( + virsh.VirshError, virsh.power_control_virsh, + poweraddr, machine, 'off') + + +class TestVirshPowerState(MAASTestCase): + """Tests for `power_state_virsh`.""" + + def test_power_state_login_failure(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = False + self.assertRaises( + virsh.VirshError, virsh.power_state_virsh, + factory.make_name('poweraddr'), factory.make_name('machine'), + password=factory.make_string()) + + def test_power_state_get_on(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = virsh.VirshVMState.ON + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + self.assertEqual( + 'on', virsh.power_state_virsh(poweraddr, machine)) + + def test_power_state_get_off(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = virsh.VirshVMState.OFF + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + self.assertEqual( + 'off', virsh.power_state_virsh(poweraddr, machine)) + + def test_power_control_bad_domain(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = None + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + self.assertRaises( + virsh.VirshError, virsh.power_state_virsh, + poweraddr, machine) + + def test_power_state_error_on_unknown_state(self): + mock_login = self.patch(virsh.VirshSSH, 'login') + mock_login.return_value = True + mock_state = self.patch(virsh.VirshSSH, 'get_state') + mock_state.return_value = 'unknown' + + poweraddr = factory.make_name('poweraddr') + machine = factory.make_name('machine') + self.assertRaises( + virsh.VirshError, virsh.power_state_virsh, + poweraddr, machine) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/ucsm.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/ucsm.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/ucsm.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/ucsm.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,461 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Support for managing nodes via Cisco UCS Manager's HTTP-XML API. + +It's useful to have a cursory understanding of how UCS Manager XML API +works. Cisco has a proprietary document that describes all of this in +more detail, and I would suggest you get a copy of that if you want more +information than is provided here. + +The Cisco DevNet website for UCS Manager has a link to the document, +which is behind a login wall, and links to example UCS queries: + +https://developer.cisco.com/web/unifiedcomputing/home + +UCS Manager is a tool for managing servers. It provides an XML API for +external applications to use to interact with UCS Manager to manage +servers. The API is available via HTTP, and requests and responses are +made of XML strings. MAAS's code for interacting with a UCS Manager is +concerned with building these requests, sending them to UCS Manager, and +processing the responses. + +UCS Manager stores information in a hierarchical structure known as the +management information tree. This structure is exposed via the XML API, +where we can manipulate objects in the tree by finding them, reading +them, and writing them. + +Some definitions for terms that are used in this code: + +Boot Policy - Controls the boot order for a server. Each service profile +is associated with a boot policy. + +Distinguished Name (DN) - Each object in UCS has a unique DN, which +describes its position in the tree. This is like a fully qualified path, +and provides a way for objects to reference other objects at other +places in the tree, or for API users to look up specific objects in the +tree. + +Class - Classes define the properties and states of objects. An object's +class is given in its tag name. + +Managed Object (MO) - An object in the management information tree. +Objects are recursive, and may have children of multiple types. With the +exception of the root object, all objects have parents. In the XML API, +objects are represented as XML elements. + +Method - Actions performed by the API on managed objects. These can +change state, or read the current state, or both. + +Server - A physical server managed by UCS Manager. Servers must be +associated with service profiles in order to be used. + +Service Profile - A set of configuration options for a server. Service +profiles define the server's personality, and can be migrated from +server to server. Service profiles describe boot policy, MAC addresses, +network connectivity, IPMI configuration, and more. MAAS requires +servers to be associated with service profiles. + +UUID - The UUID for a server. MAAS persists the UUID of each UCS managed +server it enlists, and uses it as a key for looking the server up later. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +import contextlib +import urllib2 +import urlparse + +from lxml.etree import ( + Element, + tostring, + XML, + ) +import provisioningserver.utils as utils + + +str = None + +__metaclass__ = type +__all__ = [ + 'power_control_ucsm', + 'power_state_ucsm', + 'probe_and_enlist_ucsm', +] + + +class UCSMState: + DOWN = "down" + UP = "up" + + +class UCSM_XML_API_Error(Exception): + """Failure talking to a Cisco UCS Manager.""" + + def __init__(self, msg, code): + super(UCSM_XML_API_Error, self).__init__(msg) + self.code = code + + +def make_request_data(name, fields=None, children=None): + """Build a request string for an API method.""" + root = Element(name, fields) + if children is not None: + root.extend(children) + return tostring(root) + + +def parse_response(response_string): + """Parse the response from an API method.""" + doc = XML(response_string) + + error_code = doc.get('errorCode') + if error_code is not None: + raise UCSM_XML_API_Error(doc.get('errorDescr'), error_code) + + return doc + + +class UCSM_XML_API: + """Provides access to a Cisco UCS Manager's XML API. Public methods + on this class correspond to UCS Manager XML API methods. + + Each request uses a new connection. The server supports keep-alive, + so this client could be optimized to use it too. + """ + + def __init__(self, url, username, password): + self.url = url + self.api_url = urlparse.urljoin(self.url, 'nuova') + self.username = username + self.password = password + self.cookie = None + + def _send_request(self, request_data): + """Issue a request via HTTP and parse the response.""" + request = urllib2.Request(self.api_url, request_data) + response = urllib2.urlopen(request) + response_text = response.read() + response_doc = parse_response(response_text) + return response_doc + + def _call(self, name, fields=None, children=None): + request_data = make_request_data(name, fields, children) + response = self._send_request(request_data) + return response + + def login(self): + """Login to the API and get a cookie. + + Logging into the API gives a new cookie in response. The cookie + will become inactive after it has been inactive for some amount + of time (10 minutes is the default.) + + UCS Manager allows a limited number of active cookies at any + point in time, so it's important to free the cookie up when + finished by logging out via the ``logout`` method. + """ + fields = {'inName': self.username, 'inPassword': self.password} + response = self._call('aaaLogin', fields) + self.cookie = response.get('outCookie') + + def logout(self): + """Logout from the API and free the cookie.""" + fields = {'inCookie': self.cookie} + self._call('aaaLogout', fields) + self.cookie = None + + def config_resolve_class(self, class_id, filters=None): + """Issue a configResolveClass request. + + This returns all of the objects of class ``class_id`` from the + UCS Manager. + + Filters provide a way of limiting the classes returned according + to their attributes. There are a number of filters available - + Cisco's XML API documentation has a full chapter on filters. + All we care about here is that filters are described with XML + elements. + """ + fields = {'cookie': self.cookie, 'classId': class_id} + + in_filters = Element('inFilter') + if filters: + in_filters.extend(filters) + + return self._call('configResolveClass', fields, [in_filters]) + + def config_resolve_children(self, dn, class_id=None): + """Issue a configResolveChildren request. + + This returns all of the children of the object named by ``dn``, + or if ``class_id`` is not None, all of the children of type + ``class_id``. + """ + fields = {'cookie': self.cookie, 'inDn': dn} + if class_id is not None: + fields['classId'] = class_id + return self._call('configResolveChildren', fields) + + def config_resolve_dn(self, dn): + """Retrieve a single object by name. + + This returns the object named by ``dn``, but not its children. + """ + fields = {'cookie': self.cookie, 'dn': dn} + return self._call('configResolveDn', fields) + + def config_conf_mo(self, dn, config_items): + """Issue a configConfMo request. + + This makes a configuration change on an object (MO). + """ + fields = {'cookie': self.cookie, 'dn': dn} + + in_configs = Element('inConfig') + in_configs.extend(config_items) + + self._call('configConfMo', fields, [in_configs]) + + +def get_servers(api, uuid=None): + """Retrieve a list of servers from the UCS Manager.""" + if uuid: + attrs = {'class': 'computeItem', 'property': 'uuid', 'value': uuid} + filters = [Element('eq', attrs)] + else: + filters = None + + resolved = api.config_resolve_class('computeItem', filters) + return resolved.xpath('//outConfigs/*') + + +def get_children(api, element, class_id): + """Retrieve a list of child elements from the UCS Manager.""" + resolved = api.config_resolve_children(element.get('dn'), class_id) + return resolved.xpath('//outConfigs/%s' % class_id) + + +def get_macs(api, server): + """Retrieve the list of MAC addresses assigned to a server. + + Network interfaces are represented by 'adaptorUnit' objects, and + are stored as children of servers. + """ + adaptors = get_children(api, server, 'adaptorUnit') + + macs = [] + for adaptor in adaptors: + host_eth_ifs = get_children(api, adaptor, 'adaptorHostEthIf') + macs.extend([h.get('mac') for h in host_eth_ifs]) + + return macs + + +def probe_servers(api): + """Retrieve the UUID and MAC addresses for servers from the UCS Manager.""" + servers = get_servers(api) + server_list = [(s, get_macs(api, s)) for s in servers] + return server_list + + +def get_server_power_control(api, server): + """Retrieve the power control object for a server.""" + service_profile_dn = server.get('assignedToDn') + resolved = api.config_resolve_children(service_profile_dn, 'lsPower') + power_controls = resolved.xpath('//outConfigs/lsPower') + return power_controls[0] + + +def set_server_power_control(api, power_control, command): + """Issue a power command to a server's power control.""" + attrs = {'state': command, 'dn': power_control.get('dn')} + power_change = Element('lsPower', attrs) + api.config_conf_mo(power_control.get('dn'), [power_change]) + + +def get_service_profile(api, server): + """Get the server's assigned service profile.""" + service_profile_dn = server.get('assignedToDn') + result = api.config_resolve_dn(service_profile_dn) + service_profile = result.xpath('//outConfig/lsServer')[0] + return service_profile + + +def get_first_booter(boot_profile_response): + """Find the device currently set to boot by default.""" + # The 'order' attribue is a positive integer. The device with the + # lowest order gets booted first. + orders = boot_profile_response.xpath('//outConfigs/*/@order') + ordinals = map(int, orders) + top_boot_order = min(ordinals) + first_query = '//outConfigs/*[@order=%s]' % top_boot_order + current_first = boot_profile_response.xpath(first_query)[0] + return current_first + + +RO_KEYS = ['access', 'type'] + + +def strip_ro_keys(elements): + """Remove read-only keys from configuration elements. + + These are keys for attributes that aren't allowed to be changed via + configConfMo request. They are included in MO's that we read from the + API; stripping these attributes lets us reuse the elements for those + MO's rather than building new ones from scratch. + """ + for ro_key in RO_KEYS: + for element in elements: + del(element.attrib[ro_key]) + + +def make_policy_change(boot_profile_response): + """Build the policy change tree required to make LAN boot first + priority. + + The original top priority will be swapped with LAN boot's original + priority. + """ + current_first = get_first_booter(boot_profile_response) + lan_boot = boot_profile_response.xpath('//outConfigs/lsbootLan')[0] + + if current_first == lan_boot: + return + + top_boot_order = current_first.get('order') + current_first.set('order', lan_boot.get('order')) + lan_boot.set('order', top_boot_order) + + elements = [current_first, lan_boot] + strip_ro_keys(elements) + policy_change = Element('lsbootPolicy') + policy_change.extend(elements) + return policy_change + + +def set_lan_boot_default(api, server): + """Set a server to boot via LAN by default. + + If LAN boot is already the top priority, no change will + be made. + + This command changes the server's boot profile, which will affect + any other servers also using that boot profile. This is ok, because + probe and enlist enlists all the servers in the chassis. + """ + service_profile = get_service_profile(api, server) + boot_profile_dn = service_profile.get('operBootPolicyName') + response = api.config_resolve_children(boot_profile_dn) + policy_change = make_policy_change(response) + if policy_change is None: + return + api.config_conf_mo(boot_profile_dn, [policy_change]) + + +@contextlib.contextmanager +def logged_in(url, username, password): + """Context manager that ensures the logout from the API occurs.""" + api = UCSM_XML_API(url, username, password) + api.login() + try: + yield api + finally: + api.logout() + + +def get_power_command(maas_power_mode, current_state): + """Translate a MAAS on/off state into a UCSM power command. + + If the node is up already and receives a request to power on, power + cycle the node. + """ + if maas_power_mode == 'on': + if current_state == 'up': + return 'cycle-immediate' + return 'admin-up' + elif maas_power_mode == 'off': + return 'admin-down' + else: + raise UCSM_XML_API_Error( + 'Unexpected maas power mode: %s' % (maas_power_mode), None) + + +def power_control_ucsm(url, username, password, uuid, maas_power_mode): + """Handle calls from the power template for nodes with a power type + of 'ucsm'. + """ + with logged_in(url, username, password) as api: + # UUIDs are unique per server, so we get either one or zero + # servers for a given UUID. + [server] = get_servers(api, uuid) + power_control = get_server_power_control(api, server) + command = get_power_command(maas_power_mode, + power_control.get('state')) + set_server_power_control(api, power_control, command) + + +def power_state_ucsm(url, username, password, uuid): + """Return the power state for the ucsm machine.""" + with logged_in(url, username, password) as api: + # UUIDs are unique per server, so we get either one or zero + # servers for a given UUID. + [server] = get_servers(api, uuid) + power_control = get_server_power_control(api, server) + power_state = power_control.get('state') + + if power_state == UCSMState.DOWN: + return 'off' + elif power_state == UCSMState.UP: + return 'on' + raise UCSM_XML_API_Error( + 'Unknown power state: %s' % power_state, None) + + +def probe_and_enlist_ucsm(url, username, password): + """Probe a UCS Manager and enlist all its servers. + + Here's what happens here: 1. Get a list of servers from the UCS + Manager, along with their MAC addresses. + + 2. Configure each server to boot from LAN first. + + 3. Add each server to MAAS as a new node, with a power control + method of 'ucsm'. The URL and credentials supplied are persisted + with each node so MAAS knows how to access UCSM to manage the node + in the future. + + This code expects each server in the system to have already been + associated with a service profile. The servers must have networking + configured, and their boot profiles must include a boot from LAN + option. During enlistment, the boot profile for each service profile + used by a server will be modified to move LAN boot to the highest + priority boot option. + + Also, if any node fails to enlist, this enlistment process will + stop and won't attempt to enlist any additional nodes. If a node is + already known to MAAS, it will fail to enlist, so all nodes must be + added at once. + + There is also room for optimization during enlistment. While our + client deals with a single server at a time, the API is capable + of reading/writing the settings of multiple servers in the same + request. + """ + with logged_in(url, username, password) as api: + servers = probe_servers(api) + for server, _ in servers: + set_lan_boot_default(api, server) + + for server, macs in servers: + params = { + 'power_address': url, + 'power_user': username, + 'power_pass': password, + 'uuid': server.get('uuid'), + } + utils.create_node(macs, 'amd64', 'ucsm', params) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/virsh.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/virsh.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/hardware/virsh.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/hardware/virsh.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,267 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'probe_virsh_and_enlist', + ] + +from lxml import etree +import pexpect +import provisioningserver.utils as utils + + +XPATH_ARCH = "/domain/os/type/@arch" + +# Virsh stores the architecture with a different +# label then MAAS. This maps virsh architecture to +# MAAS architecture. +ARCH_FIX = { + 'x86_64': 'amd64', + 'ppc64': 'ppc64el', + } + + +class VirshVMState: + OFF = "shut off" + ON = "running" + NO_STATE = "no state" + IDLE = "idle" + PAUSED = "paused" + IN_SHUTDOWN = "in shutdown" + CRASHED = "crashed" + PM_SUSPENDED = "pmsuspended" + + +VM_STATE_TO_POWER_STATE = { + VirshVMState.OFF: "off", + VirshVMState.ON: "on", + VirshVMState.NO_STATE: "off", + VirshVMState.IDLE: "off", + VirshVMState.PAUSED: "off", + VirshVMState.IN_SHUTDOWN: "on", + VirshVMState.CRASHED: "off", + VirshVMState.PM_SUSPENDED: "off", + } + + +class VirshError(Exception): + """Failure communicating to virsh. """ + + +class VirshSSH(pexpect.spawn): + + PROMPT = r"virsh \#" + PROMPT_SSHKEY = "(?i)are you sure you want to continue connecting" + PROMPT_PASSWORD = "(?i)(?:password)|(?:passphrase for key)" + PROMPT_DENIED = "(?i)permission denied" + PROMPT_CLOSED = "(?i)connection closed by remote host" + + PROMPTS = [ + PROMPT_SSHKEY, + PROMPT_PASSWORD, + PROMPT, + PROMPT_DENIED, + PROMPT_CLOSED, + pexpect.TIMEOUT, + pexpect.EOF, + ] + + I_PROMPT = PROMPTS.index(PROMPT) + I_PROMPT_SSHKEY = PROMPTS.index(PROMPT_SSHKEY) + I_PROMPT_PASSWORD = PROMPTS.index(PROMPT_PASSWORD) + + def __init__(self, timeout=30, maxread=2000, dom_prefix=None): + super(VirshSSH, self).__init__( + None, timeout=timeout, maxread=maxread) + self.name = '' + if dom_prefix is None: + self.dom_prefix = '' + else: + self.dom_prefix = dom_prefix + + def _execute(self, poweraddr): + """Spawns the pexpect command.""" + cmd = 'virsh --connect %s' % poweraddr + self._spawn(cmd) + + def login(self, poweraddr, password=None): + """Starts connection to virsh.""" + self._execute(poweraddr) + i = self.expect(self.PROMPTS, timeout=min(10, self.timeout)) + if i == self.I_PROMPT_SSHKEY: + # New certificate, lets always accept but if + # it changes it will fail to login. + self.sendline("yes") + i = self.expect(self.PROMPTS) + if i == self.I_PROMPT_PASSWORD: + # Requesting password, give it if available. + if password is None: + self.close() + return False + self.sendline(password) + i = self.expect(self.PROMPTS) + if i != self.I_PROMPT: + # Something bad happened, either disconnect, + # timeout, wrong password. + self.close() + return False + return True + + def logout(self): + """Quits the virsh session.""" + self.sendline("quit") + self.close() + + def prompt(self, timeout=None): + """Waits for virsh prompt.""" + if timeout is None: + timeout = self.timeout + i = self.expect([self.PROMPT, pexpect.TIMEOUT], timeout=timeout) + if i == 1: + return False + return True + + def run(self, args): + cmd = ' '.join(args) + self.sendline(cmd) + self.prompt() + result = self.before.splitlines() + return '\n'.join(result[1:]) + + def list(self): + """Lists all virtual machines by name.""" + machines = self.run(['list', '--all', '--name']) + machines = machines.strip().splitlines() + return [m for m in machines if m.startswith(self.dom_prefix)] + + def get_state(self, machine): + """Gets the virtual machine state.""" + state = self.run(['domstate', machine]) + state = state.strip() + if 'error' in state: + return None + return state + + def get_mac_addresses(self, machine): + """Gets list of mac addressess assigned to the virtual machine.""" + output = self.run(['domiflist', machine]).strip() + if 'error' in output: + return None + output = output.splitlines()[2:] + return [line.split()[4] for line in output] + + def get_arch(self, machine): + """Gets the virtual machine architecture.""" + output = self.run(['dumpxml', machine]).strip() + if 'error' in output: + return None + + doc = etree.XML(output) + evaluator = etree.XPathEvaluator(doc) + arch = evaluator(XPATH_ARCH)[0] + + # Fix architectures that need to be referenced by a different + # name, that MAAS understands. + return ARCH_FIX.get(arch, arch) + + def poweron(self, machine): + """Poweron a virtual machine.""" + output = self.run(['start', machine]).strip() + if 'error' in output: + return False + return True + + def poweroff(self, machine): + """Poweroff a virtual machine.""" + output = self.run(['destroy', machine]).strip() + if 'error' in output: + return False + return True + + +def probe_virsh_and_enlist(poweraddr, password=None, prefix_filter=None): + """Extracts all of the virtual machines from virsh and enlists them + into MAAS. + + :param poweraddr: virsh connection string + """ + conn = VirshSSH(dom_prefix=prefix_filter) + if not conn.login(poweraddr, password): + raise VirshError('Failed to login to virsh console.') + + for machine in conn.list(): + arch = conn.get_arch(machine) + state = conn.get_state(machine) + macs = conn.get_mac_addresses(machine) + + # Force the machine off, as MAAS will control the machine + # and it needs to be in a known state of off. + if state == VirshVMState.ON: + conn.poweroff(machine) + + params = { + 'power_address': poweraddr, + 'power_id': machine, + } + if password is not None: + params['power_pass'] = password + utils.create_node(macs, arch, 'virsh', params) + + conn.logout() + + +def power_control_virsh(poweraddr, machine, power_change, password=None): + """Powers controls a virtual machine using virsh.""" + + # Force password to None if blank, as the power control + # script will send a blank password if one is not set. + if password == '': + password = None + + conn = VirshSSH() + if not conn.login(poweraddr, password): + raise VirshError('Failed to login to virsh console.') + + state = conn.get_state(machine) + if state is None: + raise VirshError('Failed to get domain: %s' % machine) + + if state == VirshVMState.OFF: + if power_change == 'on': + if conn.poweron(machine) is False: + raise VirshError('Failed to power on domain: %s' % machine) + elif state == VirshVMState.ON: + if power_change == 'off': + if conn.poweroff(machine) is False: + raise VirshError('Failed to power off domain: %s' % machine) + + +def power_state_virsh(poweraddr, machine, password=None): + """Return the power state for the virtual machine using virsh.""" + + # Force password to None if blank, as the power control + # script will send a blank password if one is not set. + if password == '': + password = None + + conn = VirshSSH() + if not conn.login(poweraddr, password): + raise VirshError('Failed to login to virsh console.') + + state = conn.get_state(machine) + if state is None: + raise VirshError('Failed to get domain: %s' % machine) + + try: + return VM_STATE_TO_POWER_STATE[state] + except KeyError: + raise VirshError('Unknown state: %s' % state) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,272 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Hardware Drivers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "Architecture", + "ArchitectureRegistry", + "BootResource", + ] + +from abc import ( + ABCMeta, + abstractmethod, + ) + +from jsonschema import validate +from provisioningserver.power_schema import JSON_POWER_TYPE_PARAMETERS +from provisioningserver.utils.registry import Registry + +# JSON schema representing the Django choices format as JSON; an array of +# 2-item arrays. +CHOICE_FIELD_SCHEMA = { + 'type': 'array', + 'items': { + 'title': "Setting parameter field choice", + 'type': 'array', + 'minItems': 2, + 'maxItems': 2, + 'uniqueItems': True, + 'items': { + 'type': 'string', + } + }, +} + +# JSON schema for what a settings field should look like. +SETTING_PARAMETER_FIELD_SCHEMA = { + 'title': "Setting parameter field", + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + }, + 'field_type': { + 'type': 'string', + }, + 'label': { + 'type': 'string', + }, + 'required': { + 'type': 'boolean', + }, + 'choices': CHOICE_FIELD_SCHEMA, + 'default': { + 'type': 'string', + }, + }, + 'required': ['field_type', 'label', 'required'], +} + + +# JSON schema for what group of setting parameters should look like. +JSON_SETTING_SCHEMA = { + 'title': "Setting parameters set", + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + }, + 'description': { + 'type': 'string', + }, + 'fields': { + 'type': 'array', + 'items': SETTING_PARAMETER_FIELD_SCHEMA, + }, + }, + 'required': ['name', 'description', 'fields'], +} + + +def make_setting_field( + name, label, field_type=None, choices=None, default=None, + required=False): + """Helper function for building a JSON setting parameters field. + + :param name: The name of the field. + :type name: string + :param label: The label to be presented to the user for this field. + :type label: string + :param field_type: The type of field to create. Can be one of + (string, choice, mac_address). Defaults to string. + :type field_type: string. + :param choices: The collection of choices to present to the user. + Needs to be structured as a list of lists, otherwise + make_setting_field() will raise a ValidationError. + :type list: + :param default: The default value for the field. + :type default: string + :param required: Whether or not a value for the field is required. + :type required: boolean + """ + if field_type not in ('string', 'mac_address', 'choice'): + field_type = 'string' + if choices is None: + choices = [] + validate(choices, CHOICE_FIELD_SCHEMA) + if default is None: + default = "" + field = { + 'name': name, + 'label': label, + 'required': required, + 'field_type': field_type, + 'choices': choices, + 'default': default, + } + return field + + +def validate_settings(setting_fields): + """Helper that validates that the fields adhere to the JSON schema.""" + validate(setting_fields, JSON_SETTING_SCHEMA) + + +class Architecture: + + def __init__(self, name, description, pxealiases=None, + kernel_options=None): + """Represents an architecture in the driver context. + + :param name: The architecture name as used in MAAS. + arch/subarch or just arch. + :param description: The human-readable description for the + architecture. + :param pxealiases: The optional list of names used if the + hardware uses a different name when requesting its bootloader. + :param kernel_options: The optional list of kernel options for this + architecture. Anything supplied here supplements the options + provided by MAAS core. + """ + if pxealiases is None: + pxealiases = () + self.name = name + self.description = description + self.pxealiases = pxealiases + self.kernel_options = kernel_options + + +class BootResource: + """Abstraction of ephemerals and pxe resources required for a hardware + driver. + + This resource is responsible for importing and reporting on + what is potentially available in relation to a cluster controller. + """ + + __metaclass__ = ABCMeta + + def __init__(self, name): + self.name = name + + @abstractmethod + def import_resources(self, at_location, filter=None): + """Import the specified resources. + + :param at_location: URL to a Simplestreams index or a local path + to a directory containing boot resources. + :param filter: A simplestreams filter. + e.g. "release=trusty label=beta-2 arch=amd64" + This is ignored if the location is a local path, all resources + at the location will be imported. + TBD: How to provide progress information. + """ + + @abstractmethod + def describe_resources(self, at_location): + """Enumerate all the boot resources. + + :param at_location: URL to a Simplestreams index or a local path + to a directory containing boot resources. + + :return: a list of dictionaries describing the available resources, + which will need to be imported so the driver can use them. + [ + { + "release": "trusty", + "arch": "amd64", + "label": "beta-2", + "size": 12344556, + } + , + ] + """ + + +class HardwareDiscoverContext: + + __metaclass__ = ABCMeta + + @abstractmethod + def startDiscovery(self): + """TBD""" + + @abstractmethod + def stopDiscovery(self): + """TBD""" + + +class ArchitectureRegistry(Registry): + """Registry for architecture classes.""" + + @classmethod + def get_by_pxealias(cls, alias): + for _, arch in cls: + if alias in arch.pxealiases: + return arch + return None + + +class BootResourceRegistry(Registry): + """Registry for boot resource classes.""" + + +class PowerTypeRegistry(Registry): + """Registry for power type classes.""" + + +builtin_architectures = [ + Architecture(name="i386/generic", description="i386"), + Architecture(name="amd64/generic", description="amd64"), + Architecture( + name="arm64/generic", description="arm64/generic", + pxealiases=["arm"]), + Architecture( + name="arm64/xgene-uboot", description="arm64/xgene-uboot", + pxealiases=["arm"]), + Architecture( + name="armhf/highbank", description="armhf/highbank", + pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), + Architecture( + name="armhf/generic", description="armhf/generic", + pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), + Architecture( + name="armhf/keystone", description="armhf/keystone", + pxealiases=["arm"]), + # PPC64EL needs a rootdelay for PowerNV. The disk controller + # in the hardware, takes a little bit longer to come up then + # the initrd wants to wait. Set this to 60 seconds, just to + # give the booting machine enough time. This doesn't slow down + # the booting process, it just increases the timeout. + Architecture( + name="ppc64el/generic", description="ppc64el", + kernel_options=['rootdelay=60']), +] +for arch in builtin_architectures: + ArchitectureRegistry.register_item(arch.name, arch) + + +builtin_power_types = JSON_POWER_TYPE_PARAMETERS +for power_type in builtin_power_types: + PowerTypeRegistry.register_item(power_type['name'], power_type) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/centos.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/centos.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/centos.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/centos.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,67 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""CentOS Operating System.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "CentOS", + ] + +import re + +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystem, + ) + + +DISTRO_SERIES_DEFAULT = 'centos65' + +# Regex matcher that is used to check if the release is supported. +# It needs to match the name "centosXY". Where "X" is the major version +# and "Y" is the minor version. +DISTRO_MATCHER = re.compile("centos(?P[0-9])(?P[0-9])?\Z") + + +class CentOS(OperatingSystem): + """CentOS operating system.""" + + name = "centos" + title = "CentOS" + + def get_boot_image_purposes(self, arch, subarch, release, label): + """Gets the purpose of each boot image.""" + return [ + BOOT_IMAGE_PURPOSE.XINSTALL + ] + + def is_release_supported(self, release): + """Return True when the release is supported, False otherwise.""" + matched = DISTRO_MATCHER.match(release) + return matched is not None + + def get_default_release(self): + """Gets the default release to use when a release is not + explicit.""" + return DISTRO_SERIES_DEFAULT + + def get_release_title(self, release): + """Return the title for the given release.""" + matched = DISTRO_MATCHER.match(release) + if matched is None: + return None + matched_dict = matched.groupdict() + major = matched_dict['major'] + minor = matched_dict['minor'] + if minor is None: + minor = '0' + return "CentOS %s.%s" % (major, minor) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/custom.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/custom.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/custom.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/custom.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,64 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Operating System class used for custom images.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "CustomOS", + ] + +import os + +from provisioningserver.config import BOOT_RESOURCES_STORAGE +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystem, + ) + + +class CustomOS(OperatingSystem): + """Custom operating system.""" + + name = "custom" + title = "Custom" + + def get_boot_image_purposes(self, arch, subarch, release, label): + """Gets the purpose of each boot image.""" + # Custom images can only be used with XINSTALL. + return [BOOT_IMAGE_PURPOSE.XINSTALL] + + def is_release_supported(self, release): + """Return True when the release is supported, False otherwise.""" + # All release are supported, since the user uploaded it. + return True + + def get_default_release(self): + """Gets the default release to use when a release is not + explicit.""" + # No default for this OS. + return "" + + def get_release_title(self, release): + """Return the title for the given release.""" + # Return the same name, since the cluster does not know about the + # title of the image. The region will fix the title for the UI. + return release + + def get_xinstall_parameters(self, arch, subarch, release, label): + """Returns the xinstall image name and type for given image.""" + path = os.path.join( + BOOT_RESOURCES_STORAGE, 'current', 'custom', + arch, subarch, release, label) + if os.path.exists(os.path.join(path, 'root-dd')): + return "root-dd", "dd-tgz" + else: + return "root-tgz", "tgz" diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/debian_networking.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/debian_networking.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/debian_networking.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/debian_networking.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,148 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Code to configure networking on Debian-like operating systems.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'compose_network_interfaces', + ] + +from textwrap import dedent + +from netaddr import IPAddress + + +def extract_ip_from_sequence(ips, ip_version): + """Return the first address for the given IP version from `ips`. + + :param ips: A sequence of IP address strings. + :param ip_version: Either 4 or 6 (for IPv4 or IPv6 respectively). Only an + address for this version will be returned. + :return: A matching IP address, or `None`. + """ + for ip in ips: + if IPAddress(ip).version == ip_version: + return ip + return None + + +def extract_ip(mapping, mac, ip_version): + """Extract IP address for `mac` and given IP version from `mapping`. + + :param mapping: A dict mapping MAC addresses to iterables of IP addresses, + each with at most one IPv4 and at most one IPv6 address. + :param mac: A MAC address. + :param ip_version: Either 4 or 6 (for IPv4 or IPv6 respectively). Only an + address for this version will be returned. + :return: A matching IP address, or `None`. + """ + return extract_ip_from_sequence(mapping.get(mac, []), ip_version) + + +def compose_ipv4_stanza(interface, disable=False): + """Return a Debian `/etc/network/interfaces` stanza for DHCPv4. + + :param interface: Name of the network interface whose configuration should + be generated. + :param disable: If `True`, generate a stanza to disable the IPv4 address. + If `False` (the default), generate a DHCP stanza. + :return: Text of the interface's IPv4 address configuration stanza. + """ + if disable: + return dedent("""\ + # MAAS was configured to disable IPv4 networking on this node. + iface %s inet static + \tnetmask 255.255.255.255 + \taddress 0.0.0.0 + """.rstrip()) % interface + else: + return "iface %s inet dhcp" % interface + + +def compose_ipv6_stanza(interface, ip, gateway=None, nameserver=None, + netmask=64): + """Return a Debian `/etc/network/interfaces` stanza for IPv6. + + The stanza will configure a static address. + """ + lines = [ + 'iface %s inet6 static' % interface, + '\tnetmask %s' % netmask, + '\taddress %s' % ip, + ] + if gateway is not None: + lines.append('\tgateway %s' % gateway) + if nameserver is not None: + # Actually this keyword accepts up to 2 nameservers. + lines.append('\tdns-nameservers %s' % nameserver) + return '\n'.join(lines) + + +def has_static_ipv6_address(mapping): + """Does `mapping` contain an IPv6 address? + + :param mapping: A dict mapping MAC addresses to containers of IP addresses. + :return: Boolean: is any of the IP addresses and IPv6 address? + """ + for ips in mapping.values(): + for ip in ips: + if IPAddress(ip).version == 6: + return True + return False + + +def compose_network_interfaces(interfaces, auto_interfaces, ips_mapping, + gateways_mapping, disable_ipv4=False, + nameservers=None, netmasks=None): + """Return contents for a node's `/etc/network/interfaces` file. + + :param interfaces: A list of interface/MAC pairs for the node. + :param auto_interfaces: A list of MAC addresses whose network interfaces + should come up automatically on node boot. + :param ips_mapping: A dict mapping MAC addresses to containers of the + corresponding network interfaces' IP addresses. + :param gateways_mapping: A `defaultdict` mapping MAC addresses to + containers of the corresponding network interfaces' default gateways. + :param disable_ipv4: Should this node be installed without IPv4 networking? + :param nameservers: Optional list of DNS servers. + :param netmasks: Optional dict mapping MAC IP addresses from `ips_mapping` + to their respective netmask strings. + """ + if nameservers is None: + ipv6_nameserver = None + else: + ipv6_nameserver = extract_ip_from_sequence(nameservers, 6) + if netmasks is None: + netmasks = {} + + # Should we disable IPv4 on this node? For safety's sake, we won't do this + # if the node has no static IPv6 addresses. Otherwise it might become + # accidentally unaddressable: it may have IPv6 addresses, but apart from + # being able to guess autoconfigured addresses, we won't know what they + # are. + disable_ipv4 = (disable_ipv4 and has_static_ipv6_address(ips_mapping)) + stanzas = [ + 'auto lo', + ] + for interface, mac in interfaces: + if mac in auto_interfaces: + stanzas.append('auto %s' % interface) + stanzas.append(compose_ipv4_stanza(interface, disable=disable_ipv4)) + static_ipv6 = extract_ip(ips_mapping, mac, 6) + if static_ipv6 is not None: + gateway = extract_ip(gateways_mapping, mac, 6) + netmask = netmasks.get(static_ipv6, '64') + stanzas.append( + compose_ipv6_stanza( + interface, static_ipv6, gateway=gateway, + nameserver=ipv6_nameserver, netmask=netmask)) + return '%s\n' % '\n\n'.join(stanzas) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,259 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Osystem Drivers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "Node", + "OperatingSystem", + "OperatingSystemRegistry", + "Token", + ] + +from abc import ( + ABCMeta, + abstractmethod, + abstractproperty, + ) +from collections import namedtuple + +from provisioningserver.utils.registry import Registry + + +class BOOT_IMAGE_PURPOSE: + """The vocabulary of a `BootImage`'s purpose.""" + #: Usable for commissioning + COMMISSIONING = 'commissioning' + #: Usable for install + INSTALL = 'install' + #: Usable for fast-path install + XINSTALL = 'xinstall' + #: Usable for diskless boot + DISKLESS = 'diskless' + + +# A cluster-side representation of a Node, relevant to the osystem code, +# with only minimal fields. +Node = namedtuple("Node", ("system_id", "hostname")) + + +# A cluster-side representation of a Token, relevant to the osystem code, +# with only minimal fields. +Token = namedtuple("Token", ("consumer_key", "token_key", "token_secret")) + + +def list_boot_images_for(osystem): + """List all boot images for the given osystem.""" + # Circular import + from provisioningserver.rpc.boot_images import list_boot_images + return [ + image + for image in list_boot_images() + if image['osystem'] == osystem.name + ] + + +class OperatingSystem: + """Skeleton for an operating system.""" + + __metaclass__ = ABCMeta + + @abstractproperty + def name(self): + """Name of the operating system.""" + + @abstractproperty + def title(self): + """Title of the operating system.""" + + @abstractmethod + def is_release_supported(self, release): + """Return True when the release is supported, False otherwise.""" + + @abstractmethod + def get_default_release(self): + """Return the default release to use when none is specified. + + :return: default release to use + """ + + @abstractmethod + def get_release_title(self, release): + """Return the given release's title. + + :type release: unicode + :return: unicode + """ + + @abstractmethod + def get_boot_image_purposes(self, arch, subarch, release, label): + """Return a boot image's supported purposes. + + :param arch: Architecture of boot image. + :param subarch: Sub-architecture of boot image. + :param release: Release of boot image. + :param label: Label of boot image. + :return: list of supported purposes + """ + + def format_release_choices(self, releases): + """Format the release choices that are presented to the user. + + :param releases: list of installed boot image releases + :return: Return Django "choices" list + """ + choices = [] + releases = sorted(releases, reverse=True) + for release in releases: + title = self.get_release_title(release) + if title is not None: + choices.append((release, title)) + return choices + + def gen_supported_releases(self): + """List operating system's supported releases. + + This is based off the boot images that the cluster currently has + for this operating system. + """ + for image in list_boot_images_for(self): + release = image['release'] + if self.is_release_supported(release): + yield release + + def get_supported_releases(self): + """Return operating system's supported releases as a set. + + This is based off the boot images that the cluster currently has + for this operating system. + + :return: set of supported releases + """ + return set(self.gen_supported_releases()) + + def get_supported_commissioning_releases(self): + """List operating system's supported commissioning releases. + + Typically this will only return something for Ubuntu, because + that is the only operating system on which we commission. + + :return: list of releases. + """ + return [] + + def get_default_commissioning_release(self): + """Return operating system's default commissioning release. + + Typically this will only return something for Ubuntu, because + that is the only operating system on which we commission. + + :return: a release name, or ``None``. + """ + return None + + def requires_license_key(self, release): + """Return whether the given release requires a license key. + + :param release: Release + :return: True if requires license key, false otherwise. + """ + return False + + def validate_license_key(self, release, key): + """Validate a license key for a release. + + This is only called if the release requires a license key. + + :param release: Release + :param key: License key + :return: True if valid, false otherwise + """ + raise NotImplementedError() + + def compose_preseed(self, preseed_type, node, token, metadata_url): + """Compose preseed for the given node. + + :param preseed_type: Preseed type to compose. + :param node: Node preseed needs generating for. + :type node: :py:class:`Node` + :param token: OAuth token for URL. + :type token: :py:class:`Token` + :param metadata_url: Metdata URL for node. + :return: Preseed data for node. + :raise: + NotImplementedError: doesn't implement a custom preseed + """ + raise NotImplementedError() + + def compose_curtin_network_preseed(self, interfaces, auto_interfaces, + ips_mapping, gateways_mapping, + disable_ipv4=False, nameservers=None, + netmasks=None): + """Compose a Curtin preseed to configure a node's networking. + + :param interfaces: A list of tuples, each a pair of an interface name + and a MAC address. If supported, the resulting preseed will assign + these interface names to these MAC addresses. + :param auto_interfaces: A list of MAC addresses for the network + interfaces that should come up automatically on node boot. + :param ips_mapping: A dict mapping MAC addresses to iterables of the + corresponding network interfaces' IP addresses (up to one each for + IPv4 and IPv6). If supported, the resulting preseed will configure + the network interface corresponding to each MAC to have the given + IP addresses. By default, DHCP is available for IPv4 and will + provide the same addresses, so the caller may choose not to + configure those in this way. + :param gateways_mapping: A dict mapping MAC addresses to iterables of + default gateways (up to one each for IPv4 and IPv6). If supported, + the resulting preseed will configure the network interface + corresponding to each MAC to use the given default gateways. + :param disable_ipv4: Should this node be installed without IPv4 + networking? + :param nameservers: Optional list of DNS servers. + :param netmasks: Optional dict mapping IP addresses from `ips_mapping` + to their respective netmask strings. + :return: A list of dicts that can be JSON-encoded and submitted to + Curtin as preseeds, perhaps in combination with other preseeds. + """ + return [] + + def get_xinstall_parameters(self, arch, subarch, release, label): + """Return the xinstall image name and type for this operating system. + + :param arch: Architecture of boot image. + :param subarch: Sub-architecture of boot image. + :param release: Release of boot image. + :param label: Label of boot image. + :return: tuple with name of root image and image type + """ + return "root-tgz", "tgz" + + +class OperatingSystemRegistry(Registry): + """Registry for operating system classes.""" + + +from provisioningserver.drivers.osystem.ubuntu import UbuntuOS +from provisioningserver.drivers.osystem.centos import CentOS +from provisioningserver.drivers.osystem.custom import CustomOS +from provisioningserver.drivers.osystem.windows import WindowsOS +from provisioningserver.drivers.osystem.suse import SUSEOS + +builtin_osystems = [ + UbuntuOS(), + CentOS(), + CustomOS(), + WindowsOS(), + SUSEOS(), + ] +for osystem in builtin_osystems: + OperatingSystemRegistry.register_item(osystem.name, osystem) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/suse.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/suse.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/suse.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/suse.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,56 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""SUSE Operating System.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "SUSEOS", + ] + +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystem, + ) + + +DISTRO_SERIES_CHOICES = { + 'opensuse13': 'openSUSE 13.1', +} + +DISTRO_SERIES_DEFAULT = 'opensuse13' +assert DISTRO_SERIES_DEFAULT in DISTRO_SERIES_CHOICES + + +class SUSEOS(OperatingSystem): + """SUSE operating system.""" + + name = "suse" + title = "SUSE" + + def get_boot_image_purposes(self, arch, subarch, release, label): + """Gets the purpose of each boot image.""" + return [ + BOOT_IMAGE_PURPOSE.XINSTALL + ] + + def is_release_supported(self, release): + """Return True when the release is supported, False otherwise.""" + return release in DISTRO_SERIES_CHOICES + + def get_default_release(self): + """Gets the default release to use when a release is not + explicit.""" + return DISTRO_SERIES_DEFAULT + + def get_release_title(self, release): + """Return the title for the given release.""" + return DISTRO_SERIES_CHOICES.get(release) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_base.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_base.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_base.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.drivers.osystem`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver.drivers import osystem as osystem_module +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystemRegistry, + ) +from provisioningserver.testing.os import make_osystem +from provisioningserver.utils.testing import RegistryFixture + + +class TestOperatingSystem(MAASTestCase): + + def make_usable_osystem(self): + return make_osystem(self, factory.make_name('os'), [ + BOOT_IMAGE_PURPOSE.COMMISSIONING, + BOOT_IMAGE_PURPOSE.INSTALL, + BOOT_IMAGE_PURPOSE.XINSTALL, + ]) + + def make_boot_image_for(self, osystem, release): + return dict( + osystem=osystem, + release=release, + ) + + def configure_list_boot_images_for(self, osystem): + images = [ + self.make_boot_image_for(osystem.name, release) + for release in osystem.get_supported_releases() + ] + self.patch_autospec( + osystem_module, 'list_boot_images_for').return_value = images + return images + + def test_format_release_choices(self): + osystem = self.make_usable_osystem() + releases = osystem.get_supported_releases() + self.assertItemsEqual( + [(release, release) for release in releases], + osystem.format_release_choices(releases)) + + def test_format_release_choices_sorts(self): + osystem = self.make_usable_osystem() + releases = osystem.get_supported_releases() + self.assertEqual( + [(release, release) for release in sorted(releases, reverse=True)], + osystem.format_release_choices(releases)) + + def test_gen_supported_releases(self): + osystem = self.make_usable_osystem() + images = self.configure_list_boot_images_for(osystem) + releases = {image['release'] for image in images} + self.assertItemsEqual( + releases, osystem.gen_supported_releases()) + + +class TestOperatingSystemRegistry(MAASTestCase): + + def setUp(self): + super(TestOperatingSystemRegistry, self).setUp() + # Ensure the global registry is empty for each test run. + self.useFixture(RegistryFixture()) + + def test_operating_system_registry(self): + self.assertItemsEqual([], OperatingSystemRegistry) + OperatingSystemRegistry.register_item("resource", sentinel.resource) + self.assertIn( + sentinel.resource, + (item for name, item in OperatingSystemRegistry)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_centos.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_centos.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_centos.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_centos.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,79 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the CentOS module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from itertools import product + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.drivers.osystem.centos import ( + BOOT_IMAGE_PURPOSE, + CentOS, + DISTRO_SERIES_DEFAULT, + ) +from testtools.matchers import Equals + + +class TestCentOS(MAASTestCase): + + def test_get_boot_image_purposes(self): + osystem = CentOS() + archs = [factory.make_name('arch') for _ in range(2)] + subarchs = [factory.make_name('subarch') for _ in range(2)] + releases = [factory.make_name('release') for _ in range(2)] + labels = [factory.make_name('label') for _ in range(2)] + for arch, subarch, release, label in product( + archs, subarchs, releases, labels): + expected = osystem.get_boot_image_purposes( + arch, subarchs, release, label) + self.assertIsInstance(expected, list) + self.assertEqual(expected, [ + BOOT_IMAGE_PURPOSE.XINSTALL, + ]) + + def test_is_release_supported(self): + name_supported = { + "centos6": True, + "centos65": True, + "centos7": True, + "centos71": True, + "cent65": False, + "cent": False, + "centos711": False, + } + osystem = CentOS() + for name, supported in name_supported.items(): + self.expectThat( + osystem.is_release_supported(name), Equals(supported)) + + def test_get_default_release(self): + osystem = CentOS() + expected = osystem.get_default_release() + self.assertEqual(expected, DISTRO_SERIES_DEFAULT) + + def test_get_release_title(self): + name_titles = { + "centos6": "CentOS 6.0", + "centos65": "CentOS 6.5", + "centos7": "CentOS 7.0", + "centos71": "CentOS 7.1", + "cent65": None, + "cent": None, + "centos711": None, + } + osystem = CentOS() + for name, title in name_titles.items(): + self.expectThat( + osystem.get_release_title(name), Equals(title)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_custom.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_custom.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_custom.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_custom.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,89 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the CentOS module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from itertools import product +import os + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.drivers.osystem import custom +from provisioningserver.drivers.osystem.custom import ( + BOOT_IMAGE_PURPOSE, + CustomOS, + ) + + +class TestCustomOS(MAASTestCase): + + def make_resource_path(self, filename): + tmpdir = self.make_dir() + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + release = factory.make_name('release') + label = factory.make_name('label') + dirpath = os.path.join( + tmpdir, 'current', 'custom', arch, subarch, release, label) + os.makedirs(dirpath) + factory.make_file(dirpath, filename) + self.patch(custom, 'BOOT_RESOURCES_STORAGE', tmpdir) + return arch, subarch, release, label + + def test_get_boot_image_purposes(self): + osystem = CustomOS() + archs = [factory.make_name('arch') for _ in range(2)] + subarchs = [factory.make_name('subarch') for _ in range(2)] + releases = [factory.make_name('release') for _ in range(2)] + labels = [factory.make_name('label') for _ in range(2)] + for arch, subarch, release, label in product( + archs, subarchs, releases, labels): + expected = osystem.get_boot_image_purposes( + arch, subarchs, release, label) + self.assertIsInstance(expected, list) + self.assertEqual(expected, [ + BOOT_IMAGE_PURPOSE.XINSTALL, + ]) + + def test_is_release_supported(self): + osystem = CustomOS() + releases = [factory.make_name('release') for _ in range(3)] + supported = [ + osystem.is_release_supported(release) + for release in releases + ] + self.assertEqual([True, True, True], supported) + + def test_get_default_release(self): + osystem = CustomOS() + self.assertEqual("", osystem.get_default_release()) + + def test_get_release_title(self): + osystem = CustomOS() + release = factory.make_name('release') + self.assertEqual(release, osystem.get_release_title(release)) + + def test_get_xinstall_parameters_returns_root_tgz_tgz(self): + osystem = CustomOS() + arch, subarch, release, label = self.make_resource_path('root-tgz') + self.assertItemsEqual( + ('root-tgz', 'tgz'), + osystem.get_xinstall_parameters(arch, subarch, release, label)) + + def test_get_xinstall_parameters_returns_root_dd_dd_tgz(self): + osystem = CustomOS() + arch, subarch, release, label = self.make_resource_path('root-dd') + self.assertItemsEqual( + ('root-dd', 'dd-tgz'), + osystem.get_xinstall_parameters(arch, subarch, release, label)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_debian_networking.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_debian_networking.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_debian_networking.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_debian_networking.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,265 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for Debian `/etc/network/interfaces` generation.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from random import randint +from textwrap import dedent + +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import ANY +from provisioningserver.drivers.osystem import debian_networking +from provisioningserver.drivers.osystem.debian_networking import ( + compose_ipv4_stanza, + compose_ipv6_stanza, + compose_network_interfaces, + has_static_ipv6_address, + ) +from testtools.matchers import Contains + + +class TestComposeIPv4Stanza(MAASTestCase): + + def test__produces_dhcp_stanza_by_default(self): + interface = factory.make_name('eth') + self.assertEqual( + "iface %s inet dhcp" % interface, + compose_ipv4_stanza(interface).strip()) + + def test__produces_static_nil_address_if_disabled(self): + interface = factory.make_name('eth') + stanza = compose_ipv4_stanza(interface, disable=True) + self.expectThat( + stanza, + Contains("iface %s inet static\n" % interface)) + self.expectThat( + stanza + '\n', + Contains("address 0.0.0.0\n")) + + +class TestComposeIPv6Stanza(MAASTestCase): + + def test__produces_static_stanza(self): + ip = factory.make_ipv6_address() + netmask = randint(64, 127) + interface = factory.make_name('eth') + expected = dedent("""\ + iface %s inet6 static + \tnetmask %d + \taddress %s + """) % (interface, netmask, ip) + self.assertEqual( + expected.strip(), + compose_ipv6_stanza(interface, ip, netmask=netmask).strip()) + + def test__netmask_defaults_to_64(self): + ip = factory.make_ipv6_address() + interface = factory.make_name('eth') + self.assertIn('netmask 64', compose_ipv6_stanza(interface, ip)) + + def test__netmask_accepts_address_style_netmask_string(self): + ip = factory.make_ipv6_address() + netmask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffc' + interface = factory.make_name('eth') + self.assertIn( + 'netmask %s' % netmask, + compose_ipv6_stanza(interface, ip, netmask=netmask)) + + def test__includes_gateway_if_given(self): + ip = factory.make_ipv6_address() + interface = factory.make_name('eth') + gateway = factory.make_ipv6_address() + expected = dedent("""\ + iface %s inet6 static + \tnetmask 64 + \taddress %s + \tgateway %s + """) % (interface, ip, gateway) + self.assertEqual( + expected.strip(), + compose_ipv6_stanza(interface, ip, gateway=gateway).strip()) + + def test__adds_nameserver_if_given(self): + ip = factory.make_ipv6_address() + interface = factory.make_name('eth') + nameserver = factory.make_ipv6_address() + expected = dedent("""\ + iface %s inet6 static + \tnetmask 64 + \taddress %s + \tdns-nameservers %s + """) % (interface, ip, nameserver) + self.assertEqual( + expected.strip(), + compose_ipv6_stanza(interface, ip, nameserver=nameserver).strip()) + + +class TestHasStaticIPv6Address(MAASTestCase): + + def test__returns_False_for_empty_mapping(self): + self.assertFalse(has_static_ipv6_address({})) + + def test__finds_IPv6_address(self): + self.assertTrue( + has_static_ipv6_address( + {factory.make_mac_address(): {factory.make_ipv6_address()}})) + + def test__ignores_IPv4_address(self): + self.assertFalse( + has_static_ipv6_address( + {factory.make_mac_address(): {factory.make_ipv4_address()}})) + + def test__finds_IPv6_address_among_IPv4_addresses(self): + mapping = { + factory.make_mac_address(): {factory.make_ipv4_address()}, + factory.make_mac_address(): { + factory.make_ipv4_address(), + factory.make_ipv6_address(), + factory.make_ipv4_address(), + }, + factory.make_mac_address(): {factory.make_ipv4_address()}, + } + self.assertTrue(has_static_ipv6_address(mapping)) + + +class TestComposeNetworkInterfaces(MAASTestCase): + + def make_listing(self, interface=None, mac=None): + """Return a list containing an interface/MAC tuple.""" + if interface is None: + interface = factory.make_name('eth') + if mac is None: + mac = factory.make_mac_address() + return [(interface, mac)] + + def make_mapping(self, mac=None, ips=None): + """Create a MAC-to-IPs `defaultdict` like `map_static_ips` returns. + + The mapping will map `mac` (random by default) to `ips` (containing + one IPv6 address by default). + """ + if mac is None: + mac = factory.make_mac_address() + if ips is None: + ips = {factory.make_ipv6_address()} + return {mac: ips} + + def test__always_generates_lo(self): + self.assertIn('auto lo', compose_network_interfaces([], [], {}, {})) + + def test__generates_DHCPv4_config_if_IPv4_not_disabled(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + self.assertIn( + "\niface %s inet dhcp\n" % interface, + compose_network_interfaces( + self.make_listing(interface, mac), [], {}, {})) + + def test__generates_DHCPv4_config_if_no_IPv6_configured(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + self.assertIn( + "\niface %s inet dhcp\n" % interface, + compose_network_interfaces( + self.make_listing(interface, mac), [], {}, {}, + disable_ipv4=True)) + + def test__disables_IPv4_statically_if_IPv4_disabled(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + self.assertIn( + "\niface %s inet static" % interface, + compose_network_interfaces( + self.make_listing(interface, mac), [], self.make_mapping(mac), + {}, disable_ipv4=True)) + + def test__generates_static_IPv6_config(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + ipv6 = factory.make_ipv6_address() + disable_ipv4 = factory.pick_bool() + self.assertIn( + "\niface %s inet6 static" % interface, + compose_network_interfaces( + self.make_listing(interface, mac), [], + self.make_mapping(mac, {ipv6}), {}, disable_ipv4=disable_ipv4)) + + def test__passes_subnet_details_when_creating_IPv6_stanza(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + ipv6 = factory.make_ipv6_address() + gateway = factory.make_ipv6_address() + nameserver = factory.make_ipv6_address() + netmask = '%s' % randint(16, 127) + fake = self.patch_autospec(debian_networking, 'compose_ipv6_stanza') + fake.return_value = factory.make_name('stanza') + + compose_network_interfaces( + self.make_listing(interface, mac), [], + self.make_mapping(mac, {ipv6}), self.make_mapping(mac, {gateway}), + nameservers=[nameserver], netmasks={ipv6: netmask}) + + self.assertThat( + fake, MockCalledOnceWith( + interface, ipv6, gateway=gateway, nameserver=nameserver, + netmask=netmask)) + + def test__ignores_IPv4_nameserver_when_creating_IPv6_stanza(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + ipv6 = factory.make_ipv6_address() + nameserver = factory.make_ipv4_address() + fake = self.patch_autospec(debian_networking, 'compose_ipv6_stanza') + fake.return_value = factory.make_name('stanza') + + compose_network_interfaces( + self.make_listing(interface, mac), [], + self.make_mapping(mac, {ipv6}), gateways_mapping={}, + nameservers=[nameserver]) + + self.assertThat( + fake, MockCalledOnceWith( + interface, ANY, gateway=ANY, nameserver=None, netmask=ANY)) + + def test__omits_gateway_and_nameserver_if_not_set(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + fake = self.patch_autospec(debian_networking, 'compose_ipv6_stanza') + fake.return_value = factory.make_name('stanza') + + compose_network_interfaces( + self.make_listing(interface, mac), [], self.make_mapping(mac), {}) + + self.assertThat( + fake, + MockCalledOnceWith( + interface, ANY, gateway=None, nameserver=None, netmask=ANY)) + + def test__writes_auto_lines_for_interfaces_in_auto_interfaces(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + + interfaces_file = compose_network_interfaces( + self.make_listing(interface, mac), [mac], {}, {}) + + self.assertIn('auto %s' % interface, interfaces_file) + self.assertEqual(1, interfaces_file.count('auto %s' % interface)) + + def test__omits_auto_lines_for_interfaces_not_in_auto_interfaces(self): + interface = factory.make_name('eth') + interfaces_file = compose_network_interfaces( + self.make_listing(interface), [factory.make_mac_address()], {}, {}) + self.assertNotIn('auto %s' % interface, interfaces_file) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_suse.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_suse.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_suse.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_suse.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,57 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the SUSEOS module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from itertools import product +import random + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.drivers.osystem.suse import ( + BOOT_IMAGE_PURPOSE, + DISTRO_SERIES_CHOICES, + DISTRO_SERIES_DEFAULT, + SUSEOS, + ) + + +class TestSUSEOS(MAASTestCase): + + def test_get_boot_image_purposes(self): + osystem = SUSEOS() + archs = [factory.make_name('arch') for _ in range(2)] + subarchs = [factory.make_name('subarch') for _ in range(2)] + releases = [factory.make_name('release') for _ in range(2)] + labels = [factory.make_name('label') for _ in range(2)] + for arch, subarch, release, label in product( + archs, subarchs, releases, labels): + expected = osystem.get_boot_image_purposes( + arch, subarchs, release, label) + self.assertIsInstance(expected, list) + self.assertEqual(expected, [ + BOOT_IMAGE_PURPOSE.XINSTALL, + ]) + + def test_get_default_release(self): + osystem = SUSEOS() + expected = osystem.get_default_release() + self.assertEqual(expected, DISTRO_SERIES_DEFAULT) + + def test_get_release_title(self): + osystem = SUSEOS() + release = random.choice(DISTRO_SERIES_CHOICES.keys()) + self.assertEqual( + DISTRO_SERIES_CHOICES[release], + osystem.get_release_title(release)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_ubuntu.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_ubuntu.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_ubuntu.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_ubuntu.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,152 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the UbuntuOS module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from itertools import product +import random + +from distro_info import UbuntuDistroInfo +from maastesting.factory import factory +from maastesting.matchers import MockAnyCall +from maastesting.testcase import MAASTestCase +from provisioningserver.drivers.osystem import BOOT_IMAGE_PURPOSE +from provisioningserver.drivers.osystem.debian_networking import ( + compose_network_interfaces, + ) +from provisioningserver.drivers.osystem.ubuntu import UbuntuOS +import provisioningserver.drivers.osystem.ubuntu as ubuntu_module +from provisioningserver.udev import compose_network_interfaces_udev_rules +from provisioningserver.utils.curtin import compose_recursive_copy +from testtools.matchers import ( + AllMatch, + HasLength, + IsInstance, + ) + + +class TestUbuntuOS(MAASTestCase): + + def get_lts_release(self): + return UbuntuDistroInfo().lts() + + def get_release_title(self, release): + info = UbuntuDistroInfo() + for row in info._avail(info._date): + if row['series'] == release: + return info._format("fullname", row) + return None + + def test_get_boot_image_purposes(self): + osystem = UbuntuOS() + archs = [factory.make_name('arch') for _ in range(2)] + subarchs = [factory.make_name('subarch') for _ in range(2)] + releases = [factory.make_name('release') for _ in range(2)] + labels = [factory.make_name('label') for _ in range(2)] + for arch, subarch, release, label in product( + archs, subarchs, releases, labels): + expected = osystem.get_boot_image_purposes( + arch, subarchs, release, label) + self.assertIsInstance(expected, list) + self.assertEqual(expected, [ + BOOT_IMAGE_PURPOSE.COMMISSIONING, + BOOT_IMAGE_PURPOSE.INSTALL, + BOOT_IMAGE_PURPOSE.XINSTALL, + BOOT_IMAGE_PURPOSE.DISKLESS, + ]) + + def test_get_default_release(self): + osystem = UbuntuOS() + expected = osystem.get_default_release() + self.assertEqual(expected, self.get_lts_release()) + + def test_get_supported_commissioning_releases(self): + osystem = UbuntuOS() + expected = osystem.get_supported_commissioning_releases() + self.assertIsInstance(expected, list) + self.assertEqual(expected, [self.get_lts_release()]) + + def test_default_commissioning_release(self): + osystem = UbuntuOS() + expected = osystem.get_default_commissioning_release() + self.assertEqual(expected, self.get_lts_release()) + + def test_get_release_title(self): + osystem = UbuntuOS() + info = UbuntuDistroInfo() + release = random.choice(info.all) + self.assertEqual( + osystem.get_release_title(release), + self.get_release_title(release)) + + +class TestComposeCurtinNetworkPreseed(MAASTestCase): + + def find_preseed(self, preseeds, key): + """Extract from list of `preseeds` the first one containing `key`.""" + for preseed in preseeds: + if key in preseed: + return preseed + return None + + def test__returns_list_of_dicts(self): + preseed = UbuntuOS().compose_curtin_network_preseed([], [], {}, {}) + self.assertIsInstance(preseed, list) + self.assertThat(preseed, HasLength(2)) + [write_files, late_commands] = preseed + + self.assertIsInstance(write_files, dict) + self.assertIn('write_files', write_files) + self.assertIsInstance(write_files['write_files'], dict) + self.assertThat( + write_files['write_files'].values(), + AllMatch(IsInstance(dict))) + + self.assertIsInstance(late_commands, dict) + self.assertIn('late_commands', late_commands) + self.assertIsInstance(late_commands['late_commands'], dict) + self.assertThat( + late_commands['late_commands'].values(), + AllMatch(IsInstance(list))) + + def test__writes_network_interfaces_file(self): + interfaces_file = compose_network_interfaces([], [], {}, {}) + write_text_file = self.patch_autospec( + ubuntu_module, 'compose_write_text_file') + + UbuntuOS().compose_curtin_network_preseed([], [], {}, {}) + + temp_path = '/tmp/maas/etc/network/interfaces' + self.expectThat( + write_text_file, + MockAnyCall(temp_path, interfaces_file, permissions=0644)) + + def test__writes_udev_rules_file(self): + udev_file = compose_network_interfaces_udev_rules([]) + write_text_file = self.patch_autospec( + ubuntu_module, 'compose_write_text_file') + + UbuntuOS().compose_curtin_network_preseed([], [], {}, {}) + + temp_path = '/tmp/maas/etc/udev/rules.d/70-persistent-net.rules' + self.expectThat( + write_text_file, + MockAnyCall(temp_path, udev_file, permissions=0644)) + + def test__copies_temp_etc_to_real_etc(self): + preseed = UbuntuOS().compose_curtin_network_preseed([], [], {}, {}) + late_commands = self.find_preseed(preseed, 'late_commands') + self.assertEqual( + {'copy_etc': compose_recursive_copy('/tmp/maas/etc', '/')}, + late_commands['late_commands']) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_windows.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_windows.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/tests/test_windows.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/tests/test_windows.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,224 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the WindowsOS module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os +import random + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.drivers.osystem import ( + Node, + Token, + ) +from provisioningserver.drivers.osystem.windows import ( + BOOT_IMAGE_PURPOSE, + Config, + REQUIRE_LICENSE_KEY, + WINDOWS_CHOICES, + WINDOWS_DEFAULT, + WindowsOS, + ) + + +class TestWindowsOS(MAASTestCase): + + def make_resource_path(self, files=[]): + tmpdir = self.make_dir() + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + release = factory.make_name('release') + label = factory.make_name('label') + dirpath = os.path.join( + tmpdir, 'windows', arch, subarch, release, label) + os.makedirs(dirpath) + for fname in files: + factory.make_file(dirpath, fname) + self.patch(Config, 'load_from_cache').return_value = { + 'tftp': { + 'resource_root': tmpdir, + }, + } + return arch, subarch, release, label + + def test_get_boot_image_purposes_neither(self): + osystem = WindowsOS() + arch, subarch, release, label = self.make_resource_path() + self.assertItemsEqual( + [], + osystem.get_boot_image_purposes(arch, subarch, release, label)) + + def test_get_boot_image_purposes_both(self): + osystem = WindowsOS() + arch, subarch, release, label = self.make_resource_path( + files=['root-dd', 'pxeboot.0']) + self.assertItemsEqual( + [BOOT_IMAGE_PURPOSE.XINSTALL, BOOT_IMAGE_PURPOSE.INSTALL], + osystem.get_boot_image_purposes(arch, subarch, release, label)) + + def test_get_boot_image_purposes_xinstall_only(self): + osystem = WindowsOS() + arch, subarch, release, label = self.make_resource_path( + files=['root-dd']) + self.assertItemsEqual( + [BOOT_IMAGE_PURPOSE.XINSTALL], + osystem.get_boot_image_purposes(arch, subarch, release, label)) + + def test_get_boot_image_purposes_install_only(self): + osystem = WindowsOS() + arch, subarch, release, label = self.make_resource_path( + files=['pxeboot.0']) + self.assertItemsEqual( + [BOOT_IMAGE_PURPOSE.INSTALL], + osystem.get_boot_image_purposes(arch, subarch, release, label)) + + def test_get_default_release(self): + osystem = WindowsOS() + expected = osystem.get_default_release() + self.assertEqual(expected, WINDOWS_DEFAULT) + + def test_get_release_title(self): + osystem = WindowsOS() + release = random.choice(WINDOWS_CHOICES.keys()) + self.assertEqual( + WINDOWS_CHOICES[release], + osystem.get_release_title(release)) + + def test_requires_license_key_True(self): + osystem = WindowsOS() + for release in REQUIRE_LICENSE_KEY: + self.assertTrue(osystem.requires_license_key(release)) + + def test_requires_license_key_False(self): + osystem = WindowsOS() + not_required = set( + WINDOWS_CHOICES.keys()).difference(REQUIRE_LICENSE_KEY) + for release in not_required: + self.assertFalse(osystem.requires_license_key(release)) + + def test_validate_license_key(self): + osystem = WindowsOS() + parts = [factory.make_string(size=5) for _ in range(5)] + key = '-'.join(parts) + self.assertTrue( + osystem.validate_license_key(REQUIRE_LICENSE_KEY[0], key)) + + def test_validate_license_key_invalid(self): + osystem = WindowsOS() + keys = [factory.make_string() for _ in range(3)] + for key in keys: + self.assertFalse( + osystem.validate_license_key(REQUIRE_LICENSE_KEY[0], key)) + + def make_node(self, hostname=None): + if hostname is None: + machine = factory.make_name('hostname') + dns = factory.make_name('dns') + hostname = '%s.%s' % (machine, dns) + return Node( + system_id=factory.make_name("system_id"), + hostname=hostname, + ) + + def make_token(self, consumer_key=None, token_key=None, token_secret=None): + if consumer_key is None: + consumer_key = factory.make_name('consumer_key') + if token_key is None: + token_key = factory.make_name('token_key') + if token_secret is None: + token_secret = factory.make_name('secret_key') + return Token( + consumer_key=consumer_key, + token_key=token_key, + token_secret=token_secret, + ) + + def test_compose_pressed_not_implemented_for_curtin(self): + osystem = WindowsOS() + node = self.make_node() + token = self.make_token() + url = factory.make_name('url') + self.assertRaises( + NotImplementedError, + osystem.compose_preseed, 'curtin', node, token, url) + + def test_compose_preseed_has_required_keys(self): + osystem = WindowsOS() + node = self.make_node() + token = self.make_token() + url = factory.make_name('url') + required_keys = [ + 'maas_metadata_url', + 'maas_oauth_consumer_secret', + 'maas_oauth_consumer_key', + 'maas_oauth_token_key', + 'maas_oauth_token_secret', + 'hostname', + ] + preseed = osystem.compose_preseed('default', node, token, url) + self.assertItemsEqual(required_keys, preseed.keys()) + + def test_compose_preseed_uses_only_hostname(self): + osystem = WindowsOS() + machine = factory.make_name('hostname') + dns = factory.make_name('dns') + hostname = '%s.%s' % (machine, dns) + node = self.make_node(hostname=hostname) + token = self.make_token() + url = factory.make_name('url') + preseed = osystem.compose_preseed('default', node, token, url) + self.assertEqual(machine, preseed['hostname']) + + def test_compose_preseed_truncates_hostname(self): + osystem = WindowsOS() + machine = factory.make_name('hostname', size=20) + dns = factory.make_name('dns') + hostname = '%s.%s' % (machine, dns) + node = self.make_node(hostname=hostname) + token = self.make_token() + url = factory.make_name('url') + preseed = osystem.compose_preseed('default', node, token, url) + self.assertEqual(15, len(preseed['hostname'])) + + def test_compose_preseed_includes_oauth(self): + osystem = WindowsOS() + node = self.make_node() + consumer_key = factory.make_name('consumer_key') + token_key = factory.make_name('token_key') + token_secret = factory.make_name('secret_key') + token = self.make_token( + consumer_key=consumer_key, token_key=token_key, + token_secret=token_secret) + url = factory.make_name('url') + preseed = osystem.compose_preseed('default', node, token, url) + self.assertEqual('', preseed['maas_oauth_consumer_secret']) + self.assertEqual(consumer_key, preseed['maas_oauth_consumer_key']) + self.assertEqual(token_key, preseed['maas_oauth_token_key']) + self.assertEqual(token_secret, preseed['maas_oauth_token_secret']) + + def test_compose_preseed_includes_metadata_url(self): + osystem = WindowsOS() + node = self.make_node() + token = self.make_token() + url = factory.make_name('url') + preseed = osystem.compose_preseed('default', node, token, url) + self.assertEqual(url, preseed['maas_metadata_url']) + + def test_get_xinstall_parameters(self): + osystem = WindowsOS() + image, image_type = osystem.get_xinstall_parameters( + None, None, None, None) + self.assertEqual('root-dd', image) + self.assertEqual('dd-tgz', image_type) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/ubuntu.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/ubuntu.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/ubuntu.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/ubuntu.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,125 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Ubuntu Operating System.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "UbuntuOS", + ] + +from distro_info import UbuntuDistroInfo +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystem, + ) +from provisioningserver.drivers.osystem.debian_networking import ( + compose_network_interfaces, + ) +from provisioningserver.udev import compose_network_interfaces_udev_rules +from provisioningserver.utils.curtin import ( + compose_recursive_copy, + compose_write_text_file, + ) + + +class UbuntuOS(OperatingSystem): + """Ubuntu operating system.""" + + name = "ubuntu" + title = "Ubuntu" + + def get_boot_image_purposes(self, arch, subarch, release, label): + """Gets the purpose of each boot image.""" + return [ + BOOT_IMAGE_PURPOSE.COMMISSIONING, + BOOT_IMAGE_PURPOSE.INSTALL, + BOOT_IMAGE_PURPOSE.XINSTALL, + BOOT_IMAGE_PURPOSE.DISKLESS, + ] + + def is_release_supported(self, release): + """Return True when the release is supported, False otherwise.""" + row = self.get_distro_series_info_row(release) + return row is not None + + def get_lts_release(self): + """Return the latest Ubuntu LTS release.""" + return UbuntuDistroInfo().lts() + + def get_default_release(self): + """Gets the default release to use when a release is not + explicit.""" + return self.get_lts_release() + + def get_supported_commissioning_releases(self): + """Gets the supported commissioning releases for Ubuntu. This + only exists on Ubuntu, because that is the only operating + system that supports commissioning. + """ + return [self.get_lts_release()] + + def get_default_commissioning_release(self): + """Gets the default commissioning release for Ubuntu. This only exists + on Ubuntu, because that is the only operating system that supports + commissioning. + """ + return self.get_lts_release() + + def get_distro_series_info_row(self, release): + """Returns the distro series row information from python-distro-info. + """ + info = UbuntuDistroInfo() + for row in info._avail(info._date): + if row['series'] == release: + return row + return None + + def get_release_title(self, release): + """Return the title for the given release.""" + row = self.get_distro_series_info_row(release) + if row is None: + return None + return UbuntuDistroInfo()._format("fullname", row) + + def compose_curtin_network_preseed(self, interfaces, auto_interfaces, + ips_mapping, gateways_mapping, + disable_ipv4=False, nameservers=None, + netmasks=None): + """As defined in `OperatingSystem`: generate networking Curtin preseed. + + Supports: + * Static IPv6 address and gateway configuration. + * DHCP-based IPv4 configuration. + * Assigning network interface names through udev rules. + * Disabling IPv4. + """ + interfaces_file = compose_network_interfaces( + interfaces, auto_interfaces, ips_mapping=ips_mapping, + gateways_mapping=gateways_mapping, disable_ipv4=disable_ipv4, + nameservers=nameservers, netmasks=netmasks) + udev_rules = compose_network_interfaces_udev_rules(interfaces) + write_files = { + 'write_files': { + 'etc_network_interfaces': compose_write_text_file( + '/tmp/maas/etc/network/interfaces', interfaces_file, + permissions=0644), + 'udev_persistent_net': compose_write_text_file( + '/tmp/maas/etc/udev/rules.d/70-persistent-net.rules', + udev_rules, permissions=0644), + }, + } + late_commands = { + 'late_commands': { + 'copy_etc': compose_recursive_copy('/tmp/maas/etc', '/'), + }, + } + return [write_files, late_commands] diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/windows.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/windows.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/osystem/windows.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/osystem/windows.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,110 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Windows Operating System.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "WindowsOS", + ] + +import os +import re + +from provisioningserver.config import Config +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystem, + ) + + +WINDOWS_CHOICES = { + 'win2012': 'Windows "Server 2012"', + 'win2012r2': 'Windows "Server 2012 R2"', + 'win2012hv': 'Windows "Hyper-V Server 2012"', + 'win2012hvr2': 'Windows "Hyper-V Server 2012 R2"', +} + +WINDOWS_DEFAULT = 'win2012hvr2' + +REQUIRE_LICENSE_KEY = ['win2012', 'win2012r2'] + + +class WindowsOS(OperatingSystem): + """Windows operating system.""" + + name = "windows" + title = "Windows" + + def get_boot_image_purposes(self, arch, subarch, release, label): + """Gets the purpose of each boot image. Windows only allows install.""" + # Windows can support both xinstall and install, but the correct files + # need to be available before it is enabled. This way if only xinstall + # is available the node will boot correctly, even if fast-path + # installer is not selected. + purposes = [] + resources = Config.load_from_cache()['tftp']['resource_root'] + path = os.path.join( + resources, 'windows', arch, subarch, release, label) + if os.path.exists(os.path.join(path, 'root-dd')): + purposes.append(BOOT_IMAGE_PURPOSE.XINSTALL) + if os.path.exists(os.path.join(path, 'pxeboot.0')): + purposes.append(BOOT_IMAGE_PURPOSE.INSTALL) + return purposes + + def is_release_supported(self, release): + """Return True when the release is supported, False otherwise.""" + return release in WINDOWS_CHOICES + + def get_default_release(self): + """Gets the default release to use when a release is not + explicit.""" + return WINDOWS_DEFAULT + + def get_release_title(self, release): + """Return the title for the given release.""" + return WINDOWS_CHOICES.get(release) + + def requires_license_key(self, release): + return release in REQUIRE_LICENSE_KEY + + def validate_license_key(self, release, key): + r = re.compile('^([A-Za-z0-9]{5}-){4}[A-Za-z0-9]{5}$') + return r.match(key) + + def compose_preseed(self, preseed_type, node, token, metadata_url): + """Since this method exists in the WindowsOS class, it will be called + to provide preseed to all booting Windows nodes. + """ + # Don't override the curtin preseed. + if preseed_type == 'curtin': + raise NotImplementedError() + + # Sets the hostname in the preseed. Using just the hostname + # not the FQDN. + hostname = node.hostname.split(".", 1)[0] + # Windows max hostname length is 15 characters. + if len(hostname) > 15: + hostname = hostname[:15] + + credentials = { + 'maas_metadata_url': metadata_url, + 'maas_oauth_consumer_secret': '', + 'maas_oauth_consumer_key': token.consumer_key, + 'maas_oauth_token_key': token.token_key, + 'maas_oauth_token_secret': token.token_secret, + 'hostname': hostname, + } + return credentials + + def get_xinstall_parameters(self, arch, subarch, release, label): + """Returns the xinstall image name and type for Windows.""" + return "root-dd", "dd-tgz" diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/power/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/power/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/power/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/power/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,185 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Base power driver.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "get_power_address", + "get_mandatory_setting", + "PowerActionError", + "PowerAuthError", + "PowerConnError", + "PowerDriver", + "PowerDriverBase", + "PowerError", + "PowerFatalError", + "PowerSettingError", + "PowerToolError", + ] + +from abc import ( + ABCMeta, + abstractmethod, + abstractproperty, + ) + +from jsonschema import validate +from provisioningserver.drivers import ( + JSON_SETTING_SCHEMA, + validate_settings, + ) +from provisioningserver.utils.registry import Registry + + +JSON_POWER_DRIVERS_SCHEMA = { + 'title': "Power drivers parameters set", + 'type': 'array', + 'items': JSON_SETTING_SCHEMA, +} + + +class PowerError(Exception): + """Base error for all power driver failure commands.""" + + +class PowerFatalError(PowerError): + """Error that is raised when the power action should not continue to + retry at all. + + This exception will cause the power action to fail instantly, + without retrying. + """ + + +class PowerSettingError(PowerFatalError): + """Error that is raised when the power type is missing argument + that is required to control the BMC. + + This exception will cause the power action to fail instantly, + without retrying. + """ + + +class PowerToolError(PowerFatalError): + """Error that is raised when the power tool is missing completely + for use. + + This exception will cause the power action to fail instantly, + without retrying. + """ + + +class PowerAuthError(PowerError): + """Error raised when power driver fails to authenticate to BMC.""" + + +class PowerConnError(PowerError): + """Error raised when power driver fails to communicate to BMC.""" + + +class PowerActionError(PowerError): + """Error when actually performing an action on the BMC, like `on` + or `off`.""" + + +class PowerDriverBase: + """Base driver for a power driver.""" + + __metaclass__ = ABCMeta + + def __init__(self): + super(PowerDriverBase, self).__init__() + validate_settings(self.get_schema()) + + @abstractproperty + def name(self): + """Name of the power driver.""" + + @abstractproperty + def description(self): + """Description of the power driver.""" + + @abstractproperty + def settings(self): + """List of settings for the driver. + + Each setting in this list will be different per user. They are passed + to the `on`, `off`, and `query` using the kwargs. It is up + to the driver to read these options before performing the operation. + """ + + @abstractmethod + def on(self, system_id, **kwargs): + """Perform the power on action for `system_id`. + + :param system_id: `Node.system_id` + :param kwargs: Power settings for the node. + """ + + @abstractmethod + def off(self, system_id, **kwargs): + """Perform the power off action for `system_id`. + + :param system_id: `Node.system_id` + :param kwargs: Power settings for the node. + """ + + @abstractmethod + def query(self, system_id, **kwargs): + """Perform the query action for `system_id`. + + :param system_id: `Node.system_id` + :param kwargs: Power settings for the node. + :return: status of power on BMC. `on` or `off`. + :raises PowerError: states unable to get status from BMC. It is + up to this method to report the actual issue to the Region. The + calling function should ignore this error, and continue on. + """ + + def get_schema(self): + """Returns the JSON schema for the driver.""" + return dict( + name=self.name, description=self.description, + fields=self.settings) + + +def get_error_message(err): + """Returns the proper error message based on error.""" + if isinstance(err, PowerAuthError): + return "Could not authenticate to node's BMC: %s" % err + elif isinstance(err, PowerConnError): + return "Could not contact node's BMC: %s" % err + elif isinstance(err, PowerSettingError): + return "Missing or invalid power setting: %s" % err + elif isinstance(err, PowerToolError): + return "Missing power tool: %s" % err + elif isinstance(err, PowerActionError): + return "Failed to complete power action: %s" % err + else: + return "Failed talking to node's BMC for an unknown reason." + + +class PowerDriverRegistry(Registry): + """Registry for power drivers.""" + + @classmethod + def get_schema(cls): + """Returns the full schema for the registry.""" + schemas = [drivers.get_schema() for _, drivers in cls] + validate(schemas, JSON_POWER_DRIVERS_SCHEMA) + return schemas + + +builtin_power_drivers = [ + ] +for driver in builtin_power_drivers: + PowerDriverRegistry.register_item(driver.name, driver) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/power/tests/test_base.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/power/tests/test_base.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/power/tests/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/power/tests/test_base.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,220 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.drivers.power`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver.drivers import ( + make_setting_field, + validate_settings, + ) +from provisioningserver.drivers.power import ( + get_error_message, + PowerActionError, + PowerAuthError, + PowerConnError, + PowerDriverBase, + PowerDriverRegistry, + PowerError, + PowerSettingError, + PowerToolError, + ) +from provisioningserver.utils.testing import RegistryFixture + + +class FakePowerDriverBase(PowerDriverBase): + + name = "" + description = "" + settings = [] + + def __init__(self, name, description, settings): + self.name = name + self.description = description + self.settings = settings + super(FakePowerDriverBase, self).__init__() + + def on(self, system_id, **kwargs): + raise NotImplementedError + + def off(self, system_id, **kwargs): + raise NotImplementedError + + def query(self, system_id, **kwargs): + raise NotImplementedError + + +def make_power_driver_base(name=None, description=None, settings=None): + if name is None: + name = factory.make_name('diskless') + if description is None: + description = factory.make_name('description') + if settings is None: + settings = [] + return FakePowerDriverBase(name, description, settings) + + +class TestFakePowerDriverBase(MAASTestCase): + + def test_attributes(self): + fake_name = factory.make_name('name') + fake_description = factory.make_name('description') + fake_setting = factory.make_name('setting') + fake_settings = [ + make_setting_field( + fake_setting, fake_setting.title()), + ] + attributes = { + 'name': fake_name, + 'description': fake_description, + 'settings': fake_settings, + } + fake_driver = FakePowerDriverBase( + fake_name, fake_description, fake_settings) + self.assertAttributes(fake_driver, attributes) + + def test_make_power_driver_base(self): + fake_name = factory.make_name('name') + fake_description = factory.make_name('description') + fake_setting = factory.make_name('setting') + fake_settings = [ + make_setting_field( + fake_setting, fake_setting.title()), + ] + attributes = { + 'name': fake_name, + 'description': fake_description, + 'settings': fake_settings, + } + fake_driver = make_power_driver_base( + name=fake_name, description=fake_description, + settings=fake_settings) + self.assertAttributes(fake_driver, attributes) + + def test_make_power_driver_base_makes_name_and_description(self): + fake_driver = make_power_driver_base() + self.assertNotEqual("", fake_driver.name) + self.assertNotEqual("", fake_driver.description) + + def test_on_raises_not_implemented(self): + fake_driver = make_power_driver_base() + self.assertRaises( + NotImplementedError, + fake_driver.on, sentinel.system_id) + + def test_off_raises_not_implemented(self): + fake_driver = make_power_driver_base() + self.assertRaises( + NotImplementedError, + fake_driver.off, sentinel.system_id) + + def test_query_raises_not_implemented(self): + fake_driver = make_power_driver_base() + self.assertRaises( + NotImplementedError, + fake_driver.query, sentinel.system_id) + + +class TestPowerDriverBase(MAASTestCase): + + def test_get_schema(self): + fake_name = factory.make_name('name') + fake_description = factory.make_name('description') + fake_setting = factory.make_name('setting') + fake_settings = [ + make_setting_field( + fake_setting, fake_setting.title()), + ] + fake_driver = make_power_driver_base() + self.assertItemsEqual({ + 'name': fake_name, + 'description': fake_description, + 'fields': fake_settings, + }, + fake_driver.get_schema()) + + def test_get_schema_returns_valid_schema(self): + fake_driver = make_power_driver_base() + #: doesn't raise ValidationError + validate_settings(fake_driver.get_schema()) + + +class TestPowerDriverRegistry(MAASTestCase): + + def setUp(self): + super(TestPowerDriverRegistry, self).setUp() + # Ensure the global registry is empty for each test run. + self.useFixture(RegistryFixture()) + + def test_registry(self): + self.assertItemsEqual([], PowerDriverRegistry) + PowerDriverRegistry.register_item("driver", sentinel.driver) + self.assertIn( + sentinel.driver, + (item for name, item in PowerDriverRegistry)) + + def test_get_schema(self): + fake_driver_one = make_power_driver_base() + fake_driver_two = make_power_driver_base() + PowerDriverRegistry.register_item( + fake_driver_one.name, fake_driver_one) + PowerDriverRegistry.register_item( + fake_driver_two.name, fake_driver_two) + self.assertItemsEqual([ + { + 'name': fake_driver_one.name, + 'description': fake_driver_one.description, + 'fields': [], + }, + { + 'name': fake_driver_two.name, + 'description': fake_driver_two.description, + 'fields': [], + }], + PowerDriverRegistry.get_schema()) + + +class TestGetErrorMessage(MAASTestCase): + + scenarios = [ + ('auth', dict( + exception=PowerAuthError('auth'), + message="Could not authenticate to node's BMC: auth", + )), + ('conn', dict( + exception=PowerConnError('conn'), + message="Could not contact node's BMC: conn", + )), + ('setting', dict( + exception=PowerSettingError('setting'), + message="Missing or invalid power setting: setting", + )), + ('tool', dict( + exception=PowerToolError('tool'), + message="Missing power tool: tool", + )), + ('action', dict( + exception=PowerActionError('action'), + message="Failed to complete power action: action", + )), + ('unknown', dict( + exception=PowerError(), + message="Failed talking to node's BMC for an unknown reason.", + )), + ] + + def test_return_msg(self): + self.assertEqual(self.message, get_error_message(self.exception)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/drivers/tests/test_base.py maas-1.7.6+bzr3376/src/provisioningserver/drivers/tests/test_base.py --- maas-1.5.4+bzr2294/src/provisioningserver/drivers/tests/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/drivers/tests/test_base.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,161 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.drivers`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import random + +from jsonschema import ( + validate, + ValidationError, + ) +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver import drivers +from provisioningserver.drivers import ( + Architecture, + ArchitectureRegistry, + BootResourceRegistry, + JSON_SETTING_SCHEMA, + make_setting_field, + PowerTypeRegistry, + SETTING_PARAMETER_FIELD_SCHEMA, + validate_settings, + ) +from provisioningserver.utils.testing import RegistryFixture +from testtools.matchers import ContainsAll + + +class TestMakeSettingField(MAASTestCase): + + def test_returns_valid_schema(self): + setting = make_setting_field( + factory.make_name('name'), factory.make_name('label')) + #: doesn't raise ValidationError + validate(setting, SETTING_PARAMETER_FIELD_SCHEMA) + + def test_returns_dict_with_required_fields(self): + setting = make_setting_field( + factory.make_name('name'), factory.make_name('label')) + self.assertThat( + setting, + ContainsAll([ + 'name', 'label', 'required', + 'field_type', 'choices', 'default'])) + + def test_defaults_field_type_to_string(self): + setting = make_setting_field( + factory.make_name('name'), factory.make_name('label')) + self.assertEqual('string', setting['field_type']) + + def test_defaults_choices_to_empty_list(self): + setting = make_setting_field( + factory.make_name('name'), factory.make_name('label')) + self.assertEqual([], setting['choices']) + + def test_defaults_default_to_empty_string(self): + setting = make_setting_field( + factory.make_name('name'), factory.make_name('label')) + self.assertEqual("", setting['default']) + + def test_validates_choices(self): + choices = [('invalid')] + self.assertRaises( + ValidationError, + make_setting_field, factory.make_name('name'), + factory.make_name('label'), field_type='choice', choices=choices) + + def test_returns_dict_with_correct_values(self): + name = factory.make_name('name') + label = factory.make_name('label') + field_type = random.choice(['string', 'mac_address', 'choice']) + choices = [ + [factory.make_name('key'), factory.make_name('value')] + for _ in range(3) + ] + default = factory.make_name('default') + setting = make_setting_field( + name, label, field_type=field_type, + choices=choices, default=default, required=True) + self.assertItemsEqual({ + 'name': name, + 'label': label, + 'field_type': field_type, + 'choices': choices, + 'default': default, + 'required': True + }, setting) + + +class TestValidateSettings(MAASTestCase): + + def test_calls_validate(self): + mock_validate = self.patch(drivers, 'validate') + validate_settings(sentinel.settings) + self.assertThat( + mock_validate, + MockCalledOnceWith(sentinel.settings, JSON_SETTING_SCHEMA)) + + +class TestRegistries(MAASTestCase): + + def setUp(self): + super(TestRegistries, self).setUp() + # Ensure the global registry is empty for each test run. + self.useFixture(RegistryFixture()) + + def test_bootresource_registry(self): + self.assertItemsEqual([], BootResourceRegistry) + BootResourceRegistry.register_item("resource", sentinel.resource) + self.assertIn( + sentinel.resource, + (item for name, item in BootResourceRegistry)) + + def test_architecture_registry(self): + self.assertItemsEqual([], ArchitectureRegistry) + ArchitectureRegistry.register_item("resource", sentinel.resource) + self.assertIn( + sentinel.resource, + (item for name, item in ArchitectureRegistry)) + + def test_get_by_pxealias_returns_valid_arch(self): + arch1 = Architecture( + name="arch1", description="arch1", + pxealiases=["archibald", "reginald"]) + arch2 = Architecture( + name="arch2", description="arch2", + pxealiases=["fake", "foo"]) + ArchitectureRegistry.register_item("arch1", arch1) + ArchitectureRegistry.register_item("arch2", arch2) + self.assertEqual( + arch1, ArchitectureRegistry.get_by_pxealias("archibald")) + + def test_get_by_pxealias_returns_None_if_none_matching(self): + arch1 = Architecture( + name="arch1", description="arch1", + pxealiases=["archibald", "reginald"]) + arch2 = Architecture(name="arch2", description="arch2") + ArchitectureRegistry.register_item("arch1", arch1) + ArchitectureRegistry.register_item("arch2", arch2) + self.assertEqual( + None, ArchitectureRegistry.get_by_pxealias("stinkywinky")) + + def test_power_type_registry(self): + self.assertItemsEqual([], PowerTypeRegistry) + PowerTypeRegistry.register_item("resource", sentinel.resource) + self.assertIn( + sentinel.resource, + (item for name, item in PowerTypeRegistry)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/events.py maas-1.7.6+bzr3376/src/provisioningserver/events.py --- maas-1.5.4+bzr2294/src/provisioningserver/events.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/events.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,193 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Event catalog.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'EVENT_DETAILS', + 'EVENT_TYPES', + ] + +from collections import namedtuple +from logging import ( + DEBUG, + ERROR, + INFO, + WARN, + ) + +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.exceptions import ( + NoSuchEventType, + NoSuchNode, + ) +from provisioningserver.rpc.region import ( + RegisterEventType, + SendEvent, + SendEventMACAddress, + ) +from provisioningserver.utils.twisted import asynchronous +from twisted.internet.defer import inlineCallbacks + + +maaslog = get_maas_logger("events") + + +class EVENT_TYPES: + # Power-related events. + NODE_POWER_ON_STARTING = 'NODE_POWER_ON_STARTING' + NODE_POWER_OFF_STARTING = 'NODE_POWER_OFF_STARTING' + NODE_POWERED_ON = 'NODE_POWERED_ON' + NODE_POWERED_OFF = 'NODE_POWERED_OFF' + NODE_POWER_ON_FAILED = 'NODE_POWER_ON_FAILED' + NODE_POWER_OFF_FAILED = 'NODE_POWER_OFF_FAILED' + NODE_POWER_QUERY_FAILED = 'NODE_POWER_QUERY_FAILED' + # PXE request event. + NODE_PXE_REQUEST = 'NODE_PXE_REQUEST' + # TFTP request event. + NODE_TFTP_REQUEST = 'NODE_TFTP_REQUEST' + # Other installation-related event types. + NODE_INSTALLATION_FINISHED = "NODE_INSTALLATION_FINISHED" + # Node status transition event. + NODE_CHANGED_STATUS = "NODE_CHANGED_STATUS" + + +EventDetail = namedtuple("EventDetail", ("description", "level")) + + +EVENT_DETAILS = { + # Event type -> EventDetail mapping. + EVENT_TYPES.NODE_POWER_ON_STARTING: EventDetail( + description="Powering node on", + level=INFO, + ), + EVENT_TYPES.NODE_POWER_OFF_STARTING: EventDetail( + description="Powering node off", + level=INFO, + ), + EVENT_TYPES.NODE_POWERED_ON: EventDetail( + description="Node powered on", + level=INFO, + ), + EVENT_TYPES.NODE_POWERED_OFF: EventDetail( + description="Node powered off", + level=INFO, + ), + EVENT_TYPES.NODE_POWER_ON_FAILED: EventDetail( + description="Failed to power on node", + level=ERROR, + ), + EVENT_TYPES.NODE_POWER_OFF_FAILED: EventDetail( + description="Failed to power off node", + level=ERROR, + ), + EVENT_TYPES.NODE_POWER_QUERY_FAILED: EventDetail( + description="Failed to query node's BMC", + level=WARN, + ), + EVENT_TYPES.NODE_TFTP_REQUEST: EventDetail( + description="TFTP Request", + level=DEBUG, + ), + EVENT_TYPES.NODE_PXE_REQUEST: EventDetail( + description="PXE Request", + level=INFO, + ), + EVENT_TYPES.NODE_INSTALLATION_FINISHED: EventDetail( + description="Installation complete", + level=INFO, + ), + EVENT_TYPES.NODE_CHANGED_STATUS: EventDetail( + description="Node changed status", + level=INFO, + ), +} + + +@asynchronous +@inlineCallbacks +def send_event_node(event_type, system_id, hostname, description=''): + """Send the given node event to the region. + + Also register the event type if it's not registered yet. + + :param event_type: The type of the event. + :type event_type: unicode + :param system_id: The system ID of the node of the event. + :type system_id: unicode + :param hostname: The hostname of the node of the event. + :type hostname: unicode + :param description: An optional description of the event. + :type description: unicode + """ + client = getRegionClient() + try: + yield client( + SendEvent, system_id=system_id, type_name=event_type, + description=description) + except NoSuchEventType: + # The event type doesn't exist, register it and re-send the event. + event_detail = EVENT_DETAILS[event_type] + yield client( + RegisterEventType, name=event_type, + description=event_detail.description, level=event_detail.level + ) + yield client( + SendEvent, system_id=system_id, type_name=event_type, + description=description) + maaslog.debug( + "Node event %s sent for node: %s (%s)", + event_type, hostname, system_id) + + +@asynchronous +@inlineCallbacks +def send_event_node_mac_address(event_type, mac_address, description=''): + """Send the given node event to the region for the given mac address. + + Also register the event type if it's not registered yet. + + :param event_type: The type of the event. + :type event_type: unicode + :param mac_address: The MAC Address of the node of the event. + :type mac_address: unicode + :param description: An optional description of the event. + :type description: unicode + """ + client = getRegionClient() + try: + yield client( + SendEventMACAddress, mac_address=mac_address, type_name=event_type, + description=description) + except NoSuchEventType: + # The event type doesn't exist, register it and re-send the event. + event_detail = EVENT_DETAILS[event_type] + yield client( + RegisterEventType, name=event_type, + description=event_detail.description, level=event_detail.level + ) + try: + yield client( + SendEventMACAddress, mac_address=mac_address, + type_name=event_type, description=description) + except NoSuchNode: + # Enlistment will raise NoSuchNode, + # potentially too much noise for maaslog + pass + except NoSuchNode: + # Enlistment will raise NoSuchNode, + # potentially too much noise for maaslog + pass + maaslog.debug( + "Node event %s sent for MAC address: %s", + event_type, mac_address) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/boot_image_mapping.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/boot_image_mapping.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/boot_image_mapping.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/boot_image_mapping.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,118 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""The `BootImageMapping` class.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'BootImageMapping', + ] + +import json + +from provisioningserver.import_images.helpers import ImageSpec +from provisioningserver.utils import dict_depth + + +def gen_image_spec_with_resource(os, data): + """Generate image and resource for given operating system and data.""" + for arch in data: + for subarch in data[arch]: + for release in data[arch][subarch]: + for label in data[arch][subarch][release]: + image = ImageSpec( + os=os, arch=arch, subarch=subarch, + release=release, label=label) + resource = data[arch][subarch][release][label] + yield image, resource + + +class BootImageMapping: + """Mapping of boot-image data. + + Maps `ImageSpec` tuples to metadata for Simplestreams products. + + This class is deliberately a bit more restrictive and less ad-hoc than a + dict. It helps keep a clear view of the data structures in this module. + """ + + def __init__(self): + self.mapping = {} + + def items(self): + """Iterate over `ImageSpec` keys, and their stored values.""" + for image_spec, item in sorted(self.mapping.items()): + yield image_spec, item + + def is_empty(self): + """Is this mapping empty?""" + return len(self.mapping) == 0 + + def setdefault(self, image_spec, item): + """Set metadata for `image_spec` to item, if not already set.""" + assert isinstance(image_spec, ImageSpec) + self.mapping.setdefault(image_spec, item) + + def set(self, image_spec, item): + """"Set metadata for `image_spec` to item, even if already set.""" + assert isinstance(image_spec, ImageSpec) + self.mapping[image_spec] = item + + def dump_json(self): + """Produce JSON representing the mapped boot images. + + Tries to keep the output deterministic, so that identical data is + likely to produce identical JSON. + """ + # The meta files represent the mapping as a nested hierarchy of dicts. + # Keep that format. + data = {} + for image, resource in self.items(): + os, arch, subarch, release, label = image + data.setdefault(os, {}) + data[os].setdefault(arch, {}) + data[os][arch].setdefault(subarch, {}) + data[os][arch][subarch].setdefault(release, {}) + data[os][arch][subarch][release][label] = resource + return json.dumps(data, sort_keys=True) + + @staticmethod + def load_json(json_data): + """Take a JSON representation and deserialize into an object. + + :param json_data: string produced by dump_json(), above. + :return: A BootImageMapping + + If the json data is invalid, an empty BootImageMapping is returned. + """ + mapping = BootImageMapping() + try: + data = json.loads(json_data) + except ValueError: + return mapping + + depth = dict_depth(data) + if depth == 5: + # Support for older data. This has no operating system, then + # it is ubuntu. + for image, resource in gen_image_spec_with_resource( + "ubuntu", data): + mapping.setdefault(image, resource) + elif depth == 6: + for os in data: + for image, resource in gen_image_spec_with_resource( + os, data[os]): + mapping.setdefault(image, resource) + return mapping + + def get_image_arches(self): + """Set of arches this BootImageMapping has an ImageSpec for.""" + return {item[0].arch for item in self.items()} diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/boot_resources.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/boot_resources.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/boot_resources.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/boot_resources.py 2015-07-10 01:27:14.000000000 +0000 @@ -11,217 +11,47 @@ __metaclass__ = type __all__ = [ + 'import_images', 'main', - 'available_boot_resources', + 'main_with_services', 'make_arg_parser', ] from argparse import ArgumentParser -from collections import ( - defaultdict, - namedtuple, - ) -from datetime import datetime import errno -import functools -import glob -from gzip import GzipFile -import json -import logging -from logging import getLogger import os from textwrap import dedent +import provisioningserver from provisioningserver.boot import BootMethodRegistry from provisioningserver.boot.tftppath import list_boot_images -from provisioningserver.config import BootConfig -from provisioningserver.utils import ( - atomic_write, - call_and_check, - locate_config, - read_text_file, +from provisioningserver.config import BootSources +from provisioningserver.import_images.cleanup import ( + cleanup_snapshots_and_cache, ) -from simplestreams.contentsource import FdContentSource -from simplestreams.mirrors import ( - BasicMirrorWriter, - UrlMirrorReader, +from provisioningserver.import_images.download_descriptions import ( + download_all_image_descriptions, ) -from simplestreams.objectstores import FileStore -from simplestreams.util import ( - item_checksums, - path_from_mirror_url, - policy_read_signed, - products_exdata, +from provisioningserver.import_images.download_resources import ( + download_all_boot_resources, ) - - -def init_logger(log_level=logging.INFO): - logger = getLogger(__name__) - formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(log_level) - return logger - - -logger = init_logger() +from provisioningserver.import_images.helpers import maaslog +from provisioningserver.import_images.keyrings import write_all_keyrings +from provisioningserver.import_images.product_mapping import map_products +from provisioningserver.utils import get_cluster_config +from provisioningserver.utils.fs import ( + atomic_write, + read_text_file, + tempdir, + ) +from provisioningserver.utils.shell import call_and_check class NoConfigFile(Exception): """Raised when the config file for the script doesn't exist.""" -def create_empty_hierarchy(): - """Create hierarchy of dicts which supports h[key1]...[keyN] accesses. - - Generated object automatically creates nonexistent levels of hierarchy - when accessed the following way: h[arch][subarch][release]=something. - - :return Generated hierarchy of dicts. - """ - return defaultdict(create_empty_hierarchy) - - -# A tuple of the items that together select a boot image. -ImageSpec = namedtuple(b'ImageSpec', [ - 'arch', - 'subarch', - 'release', - 'label', - ]) - - -def iterate_boot_resources(boot_dict): - """Iterate a multi-level dict of boot images. - - Yields each combination of architecture, subarchitecture, release, and - label for which `boot` has an entry, as an `ImageSpec`. - - :param boot: Four-level dict of dicts representing boot images: the top - level maps the architectures to sub-dicts, each of which maps - subarchitectures to further dicts, each of which in turn maps - releases to yet more dicts, each of which maps release labels to any - kind of item it likes. - """ - for arch, subarches in sorted(boot_dict.items()): - for subarch, releases in sorted(subarches.items()): - for release, labels in sorted(releases.items()): - for label in sorted(labels.keys()): - yield ImageSpec(arch, subarch, release, label) - - -def value_passes_filter_list(filter_list, property_value): - """Does the given property of a boot image pass the given filter list? - - The value passes if either it matches one of the entries in the list of - filter values, or one of the filter values is an asterisk (`*`). - """ - return '*' in filter_list or property_value in filter_list - - -def value_passes_filter(filter_value, property_value): - """Does the given property of a boot image pass the given filter? - - The value passes the filter if either the filter value is an asterisk - (`*`) or the value is equal to the filter value. - """ - return filter_value in ('*', property_value) - - -def image_passes_filter(filters, arch, subarch, release, label): - """Filter a boot image against configured import filters. - - :param filters: A list of dicts describing the filters, as in `boot_merge`. - If the list is empty, or `None`, any image matches. Any entry in a - filter may be a string containing just an asterisk (`*`) to denote that - the entry will match any value. - :param arch: The given boot image's architecture. - :param subarch: The given boot image's subarchitecture. - :param release: The given boot image's OS release. - :param label: The given boot image's label. - :return: Whether the image matches any of the dicts in `filters`. - """ - if filters is None or len(filters) == 0: - return True - for filter_dict in filters: - item_matches = ( - value_passes_filter(filter_dict['release'], release) and - value_passes_filter_list(filter_dict['arches'], arch) and - value_passes_filter_list(filter_dict['subarches'], subarch) and - value_passes_filter_list(filter_dict['labels'], label) - ) - if item_matches: - return True - return False - - -def boot_merge(boot1, boot2, filters=None): - """Add entries from the second multi-level dict to the first one. - - Function copies d[arch][subarch][release]=value chains from the second - dictionary to the first one if they don't exist there and pass optional - check done by filters. - - :param boot1: first dict which will be extended in-place. - :param boot2: second dict which will be used as a source of new entries. - :param filters: list of dicts each of which contains 'arch', 'subarch', - 'release' keys; function takes d[arch][subarch][release] chain to the - first dict only if filters contain at least one dict with - arch in d['arches'], subarch in d['subarch'], d['release'] == release; - dict may have '*' as a value for 'arch' and 'release' keys and as a - member of 'subarch' list -- in that case key-specific check always - passes. - """ - for arch, subarch, release, label in iterate_boot_resources(boot2): - if image_passes_filter(filters, arch, subarch, release, label): - logger.debug( - "Merging boot resource for %s/%s/%s/%s.", - arch, subarch, release, label) - boot_resource = boot2[arch][subarch][release][label] - # Do not override an existing entry with the same - # arch/subarch/release/label: the first entry found takes - # precedence. - if not boot1[arch][subarch][release][label]: - boot1[arch][subarch][release][label] = boot_resource - - -def boot_reverse(boot): - """Determine a set of subarches which should be deployed by boot resource. - - Function reverses h[arch][subarch][release]=boot_resource hierarchy to form - boot resource to subarch relation. Many subarches may be deployed by a - single boot resource (in which case boot_resource=[subarch1, subarch2] - relation will be created). We note only subarchitectures and ignore - architectures because boot resource is tightly coupled with architecture - it can deploy according to metadata format. We can figure out for which - architecture we need to use a specific boot resource by looking at its - description in metadata. We can't do the same with subarch because we may - want to use boot resource only for a specific subset of subarches it can be - used for. To represent boot resource to subarch relation we generate the - following multi-level dictionary: d[content_id][product_name]=[subarches] - where 'content_id' and 'product_name' values come from metadata information - and allow us to uniquely identify a specific boot resource. - - :param boot: Hierarchy of dicts d[arch][subarch][release]=boot_resource - :return Hierarchy of dictionaries d[content_id][product_name]=[subarches] - which describes boot resource to subarches relation for all available - boot resources (products). - """ - reverse = create_empty_hierarchy() - - for arch, subarch, release, label in iterate_boot_resources(boot): - boot_resource = boot[arch][subarch][release][label] - content_id = boot_resource['content_id'] - product_name = boot_resource['product_name'] - version_name = boot_resource['version_name'] - existent = list(reverse[content_id][product_name][version_name]) - reverse[content_id][product_name][version_name] = [subarch] + existent - - return reverse - - -def tgt_entry(arch, subarch, release, label, image): +def tgt_entry(osystem, arch, subarch, release, label, image): """Generate tgt target used to commission arch/subarch with release Tgt target used to commission arch/subarch machine with a specific Ubuntu @@ -235,6 +65,7 @@ use the same inode for different tgt targets (even read-only targets which looks like a bug to me) without this option enabled. + :param osystem: Operating System name we generate tgt target for :param arch: Architecture name we generate tgt target for :param subarch: Subarchitecture name we generate tgt target for :param release: Ubuntu release we generate tgt target for @@ -243,190 +74,74 @@ :return Tgt entry which can be written to tgt-admin configuration file """ prefix = 'iqn.2004-05.com.ubuntu:maas' - target_name = 'ephemeral-%s-%s-%s-%s' % (arch, subarch, release, label) + target_name = 'ephemeral-%s-%s-%s-%s-%s' % ( + osystem, + arch, + subarch, + release, + label + ) entry = dedent("""\ - - readonly 1 - allow-in-use yes - backing-store "{image}" - driver iscsi - - """).format(prefix=prefix, target_name=target_name, image=image) + + readonly 1 + allow-in-use yes + backing-store "{image}" + driver iscsi + + """).format(prefix=prefix, target_name=target_name, image=image) return entry -def mirror_info_for_path(path, unsigned_policy=None, keyring=None): - if unsigned_policy is None: - unsigned_policy = lambda content, path, keyring: content - (mirror, rpath) = path_from_mirror_url(path, None) - policy = policy_read_signed - if rpath.endswith(".json"): - policy = unsigned_policy - if keyring: - policy = functools.partial(policy, keyring=keyring) - return(mirror, rpath, policy) - - -class RepoDumper(BasicMirrorWriter): - - def dump(self, path, keyring=None): - self._boot = create_empty_hierarchy() - (mirror, rpath, policy) = mirror_info_for_path(path, keyring=keyring) - reader = UrlMirrorReader(mirror, policy=policy) - super(RepoDumper, self).sync(reader, rpath) - return self._boot - - def load_products(self, path=None, content_id=None): - return - - def item_cleanup(self, item): - keys_to_keep = ['content_id', 'product_name', 'version_name', 'path'] - compact_item = {key: item[key] for key in keys_to_keep} - return compact_item - - def insert_item(self, data, src, target, pedigree, contentsource): - item = products_exdata(src, pedigree) - arch, subarches = item['arch'], item['subarches'] - release = item['release'] - label = item['label'] - compact_item = self.item_cleanup(item) - for subarch in subarches.split(','): - if not self._boot[arch][subarch][release][label]: - self._boot[arch][subarch][release][label] = compact_item - - -class RepoWriter(BasicMirrorWriter): - - def __init__(self, root_path, cache_path, info): - self._root_path = os.path.abspath(root_path) - self._info = info - self._cache = FileStore(os.path.abspath(cache_path)) - super(RepoWriter, self).__init__() - - def write(self, path, keyring=None): - (mirror, rpath, policy) = mirror_info_for_path(path, keyring=keyring) - reader = UrlMirrorReader(mirror, policy=policy) - super(RepoWriter, self).sync(reader, rpath) - - def load_products(self, path=None, content_id=None): - return - - def filter_version(self, data, src, target, pedigree): - item = products_exdata(src, pedigree) - content_id, product_name = item['content_id'], item['product_name'] - version_name = item['version_name'] - return ( - content_id in self._info and - product_name in self._info[content_id] and - version_name in self._info[content_id][product_name] - ) - - def insert_file(self, name, tag, checksums, size, contentsource): - logger.info("Inserting file %s (tag=%s, size=%s).", name, tag, size) - self._cache.insert( - tag, contentsource, checksums, mutable=False, size=size) - return [(self._cache._fullpath(tag), name)] - - def insert_root_image(self, tag, checksums, size, contentsource): - root_image_tag = 'root-image-%s' % tag - root_image_path = self._cache._fullpath(root_image_tag) - root_tgz_tag = 'root-tgz-%s' % tag - root_tgz_path = self._cache._fullpath(root_tgz_tag) - if not os.path.isfile(root_image_path): - logger.info("New root image: %s.", root_image_path) - self._cache.insert( - tag, contentsource, checksums, mutable=False, size=size) - uncompressed = FdContentSource( - GzipFile(self._cache._fullpath(tag))) - self._cache.insert(root_image_tag, uncompressed, mutable=False) - self._cache.remove(tag) - if not os.path.isfile(root_tgz_path): - logger.info("Converting root tarball: %s.", root_tgz_path) - call_uec2roottar(root_image_path, root_tgz_path) - return [(root_image_path, 'root-image'), (root_tgz_path, 'root-tgz')] - - def insert_item(self, data, src, target, pedigree, contentsource): - item = products_exdata(src, pedigree) - checksums = item_checksums(data) - tag = checksums['sha256'] - size = data['size'] - ftype = item['ftype'] - if ftype == 'root-image.gz': - links = self.insert_root_image(tag, checksums, size, contentsource) - else: - links = self.insert_file( - ftype, tag, checksums, size, contentsource) - content_id = item['content_id'] - prod_name = item['product_name'] - version_name = item['version_name'] - for subarch in self._info[content_id][prod_name][version_name]: - dst_folder = os.path.join( - self._root_path, item['arch'], subarch, item['release'], - item['label']) - if not os.path.exists(dst_folder): - os.makedirs(dst_folder) - for src, link_name in links: - link_path = os.path.join(dst_folder, link_name) - if os.path.isfile(link_path): - os.remove(link_path) - os.link(src, link_path) - - -def available_boot_resources(root): - for resource_path in glob.glob(os.path.join(root, '*/*/*/*')): - arch, subarch, release, label = resource_path.split('/')[-4:] - yield (arch, subarch, release, label) - - -def install_boot_loaders(destination): +def install_boot_loaders(destination, arches): """Install the all the required file from each bootloader method. :param destination: Directory where the loaders should be stored. + :param arches: Arches we want to install boot loaders for. """ - for _, method in BootMethodRegistry: - method.install_bootloader(destination) - - -def call_uec2roottar(*args): - """Invoke `uec2roottar` with the given arguments. - - Here only so tests can stub it out. - """ - call_and_check(["uec2roottar"] + list(args)) + for _, boot_method in BootMethodRegistry: + if arches.intersection(boot_method.bootloader_arches) != set(): + boot_method.install_bootloader(destination) def make_arg_parser(doc): """Create an `argparse.ArgumentParser` for this script.""" parser = ArgumentParser(description=doc) - default_config = locate_config("bootresources.yaml") parser.add_argument( - '--config-file', action="store", default=default_config, - help="Path to config file " - "(defaults to %s)" % default_config) + '--sources-file', action="store", required=True, + help=( + "Path to YAML file defining import sources. " + "See this script's man page for a description of " + "that YAML file's format." + ) + ) return parser def compose_targets_conf(snapshot_path): """Produce the contents of a snapshot's tgt conf file. - :param snasphot_path: Filesystem path to a snapshot of boot images. + :param snapshot_path: Filesystem path to a snapshot of current upstream + boot resources. :return: Contents for a `targets.conf` file. :rtype: bytes """ # Use a set to make sure we don't register duplicate entries in tgt. entries = set() for item in list_boot_images(snapshot_path): + osystem = item['osystem'] arch = item['architecture'] subarch = item['subarchitecture'] release = item['release'] label = item['label'] - entries.add((arch, subarch, release, label)) + entries.add((osystem, arch, subarch, release, label)) tgt_entries = [] - for arch, subarch, release, label in sorted(entries): + for osystem, arch, subarch, release, label in sorted(entries): root_image = os.path.join( - snapshot_path, arch, subarch, release, label, 'root-image') + snapshot_path, osystem, arch, subarch, + release, label, 'root-image') if os.path.isfile(root_image): - entry = tgt_entry(arch, subarch, release, label, root_image) + entry = tgt_entry( + osystem, arch, subarch, release, label, root_image) tgt_entries.append(entry) text = ''.join(tgt_entries) return text.encode('utf-8') @@ -436,22 +151,17 @@ """Does the `maas.meta` file match `content`? If the file's contents match the latest data, there is no need to update. - """ - current_meta = os.path.join(storage, 'current', 'maas.meta') - return ( - os.path.isfile(current_meta) and - content == read_text_file(current_meta) - ) - -def compose_snapshot_path(storage): - """Put together a path for a new snapshot. - - A snapshot is a directory in `storage` containing images. The name - contains the date in a sortable format. + The file's timestamp is also updated to now to reflect the last time + that this import was run. """ - snapshot_name = 'snapshot-%s' % datetime.now().strftime('%Y%m%d-%H%M%S') - return os.path.join(storage, snapshot_name) + current_meta = os.path.join(storage, 'current', 'maas.meta') + exists = os.path.isfile(current_meta) + if exists: + # Touch file to the current timestamp so that the last time this + # import ran can be determined. + os.utime(current_meta, None) + return exists and content == read_text_file(current_meta) def update_current_symlink(storage, latest_snapshot): @@ -462,20 +172,42 @@ os.symlink(latest_snapshot, symlink_path) -def write_snapshot_metadata(snapshot, meta_file_content, targets_conf, - targets_conf_content): - """Write "meta" file and tgt config for `snapshot`.""" +def write_snapshot_metadata(snapshot, meta_file_content): + """Write "maas.meta" file.""" meta_file = os.path.join(snapshot, 'maas.meta') atomic_write(meta_file_content, meta_file, mode=0644) + + +def write_targets_conf(snapshot): + """Write "maas.tgt" file.""" + targets_conf = os.path.join(snapshot, 'maas.tgt') + targets_conf_content = compose_targets_conf(snapshot) atomic_write(targets_conf_content, targets_conf, mode=0644) -def main(args): - logger.info("Importing boot resources.") +def update_targets_conf(snapshot): + """Runs tgt-admin to update the new targets from "maas.tgt".""" + targets_conf = os.path.join(snapshot, 'maas.tgt') + call_and_check([ + 'sudo', + '/usr/sbin/tgt-admin', + '--conf', targets_conf, + '--update', 'ALL', + ]) + + +def read_sources(sources_yaml): + """Read boot resources config file. + + :param sources_yaml: Path to a YAML file containing a list of boot + resource definitions. + :return: A dict representing the boot-resources configuration. + :raise NoConfigFile: If the configuration file was not present. + """ # The config file is required. We do not fall back to defaults if it's # not there. try: - config = BootConfig.load_from_cache(filename=args.config_file) + return BootSources.load(filename=sources_yaml) except IOError as ex: if ex.errno == errno.ENOENT: # No config file. We have helpful error output for this. @@ -484,40 +216,152 @@ # Unexpected error. raise - storage = config['boot']['storage'] - boot = create_empty_hierarchy() - dumper = RepoDumper() +def parse_sources(sources_yaml): + """Given a YAML `config` string, return a `BootSources` for it.""" + from StringIO import StringIO + return BootSources.parse(StringIO(sources_yaml)) + - for source in config['boot']['sources']: - repo_boot = dumper.dump(source['path'], keyring=source['keyring']) - boot_merge(boot, repo_boot, source['selections']) - - meta_file_content = json.dumps(boot, sort_keys=True) - if meta_contains(storage, meta_file_content): - # The current maas.meta already contains the new config. No need to - # rewrite anything. +def import_images(sources): + """Import images. Callable from the command line. + + :param config: An iterable of dicts representing the sources from + which boot images will be downloaded. + """ + maaslog.info("Started importing boot images.") + if len(sources) == 0: + maaslog.warn("Can't import: region did not provide a source.") return - reverse_boot = boot_reverse(boot) - snapshot_path = compose_snapshot_path(storage) - cache_path = os.path.join(storage, 'cache') - targets_conf = os.path.join(snapshot_path, 'maas.tgt') - writer = RepoWriter(snapshot_path, cache_path, reverse_boot) - - for source in config['boot']['sources']: - writer.write(source['path'], source['keyring']) - - targets_conf_content = compose_targets_conf(snapshot_path) - - logger.info("Writing metadata and updating iSCSI targets.") - write_snapshot_metadata( - snapshot_path, meta_file_content, targets_conf, targets_conf_content) - call_and_check(['tgt-admin', '--conf', targets_conf, '--update', 'ALL']) + with tempdir('keyrings') as keyrings_path: + # We download the keyrings now because we need them for both + # download_all_image_descriptions() and + # download_all_boot_resources() later. + sources = write_all_keyrings(keyrings_path, sources) + + image_descriptions = download_all_image_descriptions(sources) + if image_descriptions.is_empty(): + maaslog.warn( + "Finished importing boot images, the region does not have " + "any boot images available.") + return + + storage = provisioningserver.config.BOOT_RESOURCES_STORAGE + meta_file_content = image_descriptions.dump_json() + if meta_contains(storage, meta_file_content): + maaslog.info( + "Finished importing boot images, the region does not " + "have any new images.") + return + + product_mapping = map_products(image_descriptions) + + snapshot_path = download_all_boot_resources( + sources, storage, product_mapping) + + maaslog.info("Writing boot image metadata and iSCSI targets.") + write_snapshot_metadata(snapshot_path, meta_file_content) + write_targets_conf(snapshot_path) - logger.info("Installing boot images snapshot %s.", snapshot_path) - install_boot_loaders(snapshot_path) + maaslog.info("Installing boot images snapshot %s" % snapshot_path) + install_boot_loaders(snapshot_path, image_descriptions.get_image_arches()) # If we got here, all went well. This is now truly the "current" snapshot. update_current_symlink(storage, snapshot_path) - logger.info("Import done.") + maaslog.info("Updating boot image iSCSI targets.") + update_targets_conf(snapshot_path) + + # Now cleanup the old snapshots and cache. + maaslog.info('Cleaning up old snapshots and cache.') + cleanup_snapshots_and_cache(storage) + + # Import is now finished. + maaslog.info("Finished importing boot images.") + + +def main(args): + """Entry point for the command-line import script. + + :param args: Command-line arguments as parsed by the `ArgumentParser` + returned by `make_arg_parser`. + :raise NoConfigFile: If a config file is specified but doesn't exist. + """ + sources = read_sources(args.sources_file) + import_images(sources=sources) + + +def main_with_services(args): + """The *real* entry point for the command-line import script. + + This sets up the necessary RPC services before calling `main`, then clears + up behind itself. + + :param args: Command-line arguments as parsed by the `ArgumentParser` + returned by `make_arg_parser`. + :raise NoConfigFile: If a config file is specified but doesn't exist. + + """ + from sys import stderr + import traceback + + from provisioningserver import services + from provisioningserver.rpc import getRegionClient + from provisioningserver.rpc.clusterservice import ClusterClientService + from provisioningserver.rpc.exceptions import NoConnectionsAvailable + from provisioningserver.utils.twisted import retries, pause + from twisted.internet import reactor + from twisted.internet.defer import inlineCallbacks + from twisted.internet.threads import deferToThread + + @inlineCallbacks + def start_services(): + rpc_service = ClusterClientService(reactor) + rpc_service.setName("rpc") + rpc_service.setServiceParent(services) + + yield services.startService() + + for elapsed, remaining, wait in retries(15, 1, reactor): + try: + yield getRegionClient() + except NoConnectionsAvailable: + yield pause(wait, reactor) + else: + break + else: + print("Can't connect to the region.", file=stderr) + raise SystemExit(1) + + @inlineCallbacks + def stop_services(): + yield services.stopService() + + exit_codes = {0} + + @inlineCallbacks + def run_main(): + try: + yield start_services() + try: + yield deferToThread(main, args) + finally: + yield stop_services() + except SystemExit as se: + exit_codes.add(se.code) + except: + exit_codes.add(2) + print("Failed to import boot resources", file=stderr) + traceback.print_exc() + finally: + reactor.callLater(0, reactor.stop) + + cluster_config = get_cluster_config('/etc/maas/maas_cluster.conf') + os.environ['MAAS_URL'] = cluster_config['MAAS_URL'] + os.environ['CLUSTER_UUID'] = cluster_config['CLUSTER_UUID'] + + reactor.callWhenRunning(run_main) + reactor.run() + + exit_code = max(exit_codes) + raise SystemExit(exit_code) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/cleanup.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/cleanup.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/cleanup.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/cleanup.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Clean up old snapshot directories.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'cleanup_snapshots_and_cache', + ] + +import os +import shutil + + +def list_old_snapshots(storage): + """List of snapshot directories that are no longer in use.""" + current_dir = os.path.join(storage, 'current') + current_snapshot = os.path.basename(os.readlink(current_dir)) + return [ + os.path.join(storage, directory) + for directory in os.listdir(storage) + if directory.startswith('snapshot-') and directory != current_snapshot + ] + + +def cleanup_snapshots(storage): + """Remove old snapshot directories.""" + old_snapshots = list_old_snapshots(storage) + for snapshot in old_snapshots: + shutil.rmtree(snapshot) + + +def list_unused_cache_files(storage): + """List of cache files that are no longer being referenced by snapshots.""" + cache_dir = os.path.join(storage, 'cache') + cache_files = [ + os.path.join(cache_dir, filename) + for filename in os.listdir(cache_dir) + if os.path.isfile(os.path.join(cache_dir, filename)) + ] + return [ + cache_file + for cache_file in cache_files + if os.stat(cache_file).st_nlink == 1 + ] + + +def cleanup_cache(storage): + """Remove files that are no longer being referenced by snapshots.""" + cache_files = list_unused_cache_files(storage) + for cache_file in cache_files: + os.remove(cache_file) + + +def cleanup_snapshots_and_cache(storage): + """Remove old snapshot directories and old cache files.""" + cleanup_snapshots(storage) + cleanup_cache(storage) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/download_descriptions.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/download_descriptions.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/download_descriptions.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/download_descriptions.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,206 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Download boot resource descriptions from Simplestreams repo. + +This module is responsible only for syncing the repo's metadata, not the boot +resources themselves. The two are handled in separate Simplestreams +synchronisation stages. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'download_all_image_descriptions', + ] + + +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.helpers import ( + get_os_from_product, + get_signing_policy, + ImageSpec, + ) +from simplestreams.mirrors import ( + BasicMirrorWriter, + UrlMirrorReader, + ) +from simplestreams.util import ( + path_from_mirror_url, + products_exdata, + ) + + +def clean_up_repo_item(item): + """Return a subset of dict `item` for storing in a boot images dict.""" + keys_to_keep = [ + 'content_id', 'product_name', 'version_name', 'path', 'subarches'] + compact_item = {key: item[key] for key in keys_to_keep} + return compact_item + + +class RepoDumper(BasicMirrorWriter): + """Gather metadata about boot images available in a Simplestreams repo. + + Used inside `download_image_descriptions`. Stores basic metadata about + each image it finds upstream in a given `BootImageMapping`. Each stored + item is a dict containing the basic metadata for retrieving a boot image. + + Simplestreams' `BasicMirrorWriter` in itself is stateless. It relies on + a subclass (such as this one) to store data. + + :ivar boot_images_dict: A `BootImageMapping`. Image metadata will be + stored here as it is discovered. Simplestreams does not interact with + this variable. + """ + + def __init__(self, boot_images_dict): + super(RepoDumper, self).__init__(config={ + # Only download the latest version. Without this all versions + # will be read, causing miss matches in versions. + 'max_items': 1, + }) + self.boot_images_dict = boot_images_dict + + def load_products(self, path=None, content_id=None): + """Overridable from `BasicMirrorWriter`.""" + # It looks as if this method only makes sense for MirrorReaders, not + # for MirrorWriters. The default MirrorWriter implementation just + # raises NotImplementedError. Stop it from doing that. + return + + def insert_item(self, data, src, target, pedigree, contentsource): + """Overridable from `BasicMirrorWriter`.""" + item = products_exdata(src, pedigree) + os = get_os_from_product(item) + arch, subarches = item['arch'], item['subarches'] + release = item['release'] + label = item['label'] + base_image = ImageSpec(os, arch, None, release, label) + compact_item = clean_up_repo_item(item) + for subarch in subarches.split(','): + self.boot_images_dict.setdefault( + base_image._replace(subarch=subarch), compact_item) + + # HWE resources need to map to a specfic resource, and not just to + # any of the supported subarchitectures for that resource. + subarch = item['subarch'] + self.boot_images_dict.set( + base_image._replace(subarch=subarch), compact_item) + + # HWE resources with generic, should map to the HWE that ships with + # that release. + hwe_arch = 'hwe-%s' % release[0] + if subarch == hwe_arch and 'generic' in subarches: + self.boot_images_dict.set( + base_image._replace(subarch='generic'), compact_item) + + +def value_passes_filter_list(filter_list, property_value): + """Does the given property of a boot image pass the given filter list? + + The value passes if either it matches one of the entries in the list of + filter values, or one of the filter values is an asterisk (`*`). + """ + return '*' in filter_list or property_value in filter_list + + +def value_passes_filter(filter_value, property_value): + """Does the given property of a boot image pass the given filter? + + The value passes the filter if either the filter value is an asterisk + (`*`) or the value is equal to the filter value. + """ + return filter_value in ('*', property_value) + + +def image_passes_filter(filters, os, arch, subarch, release, label): + """Filter a boot image against configured import filters. + + :param filters: A list of dicts describing the filters, as in `boot_merge`. + If the list is empty, or `None`, any image matches. Any entry in a + filter may be a string containing just an asterisk (`*`) to denote that + the entry will match any value. + :param os: The given boot image's operating system. + :param arch: The given boot image's architecture. + :param subarch: The given boot image's subarchitecture. + :param release: The given boot image's OS release. + :param label: The given boot image's label. + :return: Whether the image matches any of the dicts in `filters`. + """ + if filters is None or len(filters) == 0: + return True + for filter_dict in filters: + item_matches = ( + value_passes_filter(filter_dict['os'], os) and + value_passes_filter(filter_dict['release'], release) and + value_passes_filter_list(filter_dict['arches'], arch) and + value_passes_filter_list(filter_dict['subarches'], subarch) and + value_passes_filter_list(filter_dict['labels'], label) + ) + if item_matches: + return True + return False + + +def boot_merge(destination, additions, filters=None): + """Complement one `BootImageMapping` with entries from another. + + This adds entries from `additions` (that match `filters`, if given) to + `destination`, but only for those image specs for which `destination` does + not have entries yet. + + :param destination: `BootImageMapping` to be updated. It will be extended + in-place. + :param additions: A second `BootImageMapping`, which will be used as a + source of additional entries. + :param filters: List of dicts, each of which contains 'os', arch', + 'subarch', 'release', and 'label' keys. If given, entries are only + considered for copying from `additions` to `destination` if they match + at least one of the filters. Entries in the filter may be the string + `*` (or for entries that are lists, may contain the string `*`) to make + them match any value. + """ + for image, resource in additions.items(): + os, arch, subarch, release, label = image + if image_passes_filter( + filters, os, arch, subarch, release, label): + # Do not override an existing entry with the same + # os/arch/subarch/release/label: the first entry found takes + # precedence. + destination.setdefault(image, resource) + + +def download_image_descriptions(path, keyring=None): + """Download image metadata from upstream Simplestreams repo. + + :param path: The path to a Simplestreams repo. + :param keyring: Optional keyring for verifying the repo's signatures. + :return: A `BootImageMapping` describing available boot resources. + """ + mirror, rpath = path_from_mirror_url(path, None) + policy = get_signing_policy(rpath, keyring) + reader = UrlMirrorReader(mirror, policy=policy) + boot_images_dict = BootImageMapping() + dumper = RepoDumper(boot_images_dict) + dumper.sync(reader, rpath) + return boot_images_dict + + +def download_all_image_descriptions(sources): + """Download image metadata for all sources in `config`.""" + boot = BootImageMapping() + for source in sources: + repo_boot = download_image_descriptions( + source['url'], keyring=source.get('keyring', None)) + boot_merge(boot, repo_boot, source['selections']) + return boot diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/download_resources.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/download_resources.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/download_resources.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/download_resources.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,287 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Simplestreams code to download boot resources.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'download_all_boot_resources', + ] + +from datetime import datetime +from gzip import GzipFile +import os.path + +from provisioningserver.import_images.helpers import ( + get_os_from_product, + get_signing_policy, + maaslog, + ) +from provisioningserver.utils.shell import call_and_check +from simplestreams.contentsource import FdContentSource +from simplestreams.mirrors import ( + BasicMirrorWriter, + UrlMirrorReader, + ) +from simplestreams.objectstores import FileStore +from simplestreams.util import ( + item_checksums, + path_from_mirror_url, + products_exdata, + ) + + +DEFAULT_KEYRING_PATH = "/usr/share/keyrings" + + +def insert_file(store, name, tag, checksums, size, content_source): + """Insert a file into `store`. + + :param store: A simplestreams `ObjectStore`. + :param name: Logical name of the file being inserted. Only needs to be + unique within the scope of this boot image. + :param tag: UUID, or "tag," for the file. It will be inserted into + `store` under this name, not its logical name. + :param checksums: A Simplestreams checksums dict, mapping hash algorihm + names (such as `sha256`) to the file's respective checksums as + computed by those hash algorithms. + :param size: Optional size for the file, so Simplestreams knows what size + to expect. + :param content_source: A Simplestreams `ContentSource` for reading the + file. + :return: A list of inserted files (actually, only the one file in this + case) described as tuples of (path, logical name). The path lies in + the directory managed by `store` and has a filename based on `tag`, + not logical name. + """ + maaslog.debug("Inserting file %s (tag=%s, size=%s).", name, tag, size) + store.insert(tag, content_source, checksums, mutable=False, size=size) + # XXX jtv 2014-04-24 bug=1313580: Isn't _fullpath meant to be private? + return [(store._fullpath(tag), name)] + + +def call_uec2roottar(root_image_path, root_tgz_path): + """Invoke `uec2roottar` with the given arguments. + + Here only so tests can stub it out. + + :param root_image_path: Input file. + :param root_tgz_path: Output file. + """ + call_and_check([ + 'sudo', '/usr/bin/uec2roottar', + '--user=maas', + root_image_path, + root_tgz_path, + ]) + + +def insert_root_image(store, tag, checksums, size, content_source): + """Insert a root image into `store`. + + This may involve converting a UEC boot image into a root tarball. + + :param store: A simplestreams `ObjectStore`. + :param tag: UUID, or "tag," for the file root image file. The root image + and root tarball will both be stored in the cache directory under + names derived from this tag. + :param checksums: A Simplestreams checksums dict, mapping hash algorihm + names (such as `sha256`) to the file's respective checksums as + computed by those hash algorithms. + :param size: Optional size for the file, so Simplestreams knows what size + to expect. + :param content_source: A Simplestreams `ContentSource` for reading the + file. + :return: A list of inserted files (root image and root tarball) described + as tuples of (path, logical name). The path lies in the directory + managed by `store` and has a filename based on `tag`, not logical name. + """ + maaslog.debug("Inserting root image (tag=%s, size=%s).", tag, size) + root_image_tag = 'root-image-%s' % tag + # XXX jtv 2014-04-24 bug=1313580: Isn't _fullpath meant to be private? + root_image_path = store._fullpath(root_image_tag) + root_tgz_tag = 'root-tgz-%s' % tag + root_tgz_path = store._fullpath(root_tgz_tag) + if not os.path.isfile(root_image_path): + maaslog.debug("New root image: %s.", root_image_path) + store.insert(tag, content_source, checksums, mutable=False, size=size) + uncompressed = FdContentSource(GzipFile(store._fullpath(tag))) + store.insert(root_image_tag, uncompressed, mutable=False) + store.remove(tag) + if not os.path.isfile(root_tgz_path): + maaslog.debug("Converting root tarball: %s.", root_tgz_path) + call_uec2roottar(root_image_path, root_tgz_path) + return [(root_image_path, 'root-image'), (root_tgz_path, 'root-tgz')] + + +def link_resources(snapshot_path, links, osystem, arch, release, label, + subarches): + """Hardlink entries in the snapshot directory to resources in the cache. + + This creates file entries in the snapshot directory for boot resources + that are part of a single boot image. + + :param snapshot_path: Snapshot directory. + :param links: A list of links that should be created to files stored in + the cache. Each link is described as a tuple of (path, logical + name). The path points to a file in the cache directory. The logical + name will be link's filename, without path. + :param osystem: Operating system with this boot image supports. + :param arch: Architecture which this boot image supports. + :param release: OS release of which this boot image is a part. + :param label: OS release label of which this boot image is a part, e.g. + `release` or `rc`. + :param subarches: A list of sub-architectures which this boot image + supports. For example, a kernel for one Ubuntu release for a given + architecture and subarchitecture `generic` will typically also support + the `hwe-*` subarchitectures that denote hardware-enablement kernels + for older Ubuntu releases. + """ + for subarch in subarches: + directory = os.path.join( + snapshot_path, osystem, arch, subarch, release, label) + if not os.path.exists(directory): + os.makedirs(directory) + for cached_file, logical_name in links: + link_path = os.path.join(directory, logical_name) + if os.path.isfile(link_path): + os.remove(link_path) + os.link(cached_file, link_path) + + +class RepoWriter(BasicMirrorWriter): + """Download boot resources from an upstream Simplestreams repo. + + :ivar root_path: Snapshot directory. + :ivar store: A simplestreams `ObjectStore` where downloaded resources + should be stored. + :ivar product_mapping: A `ProductMapping` describing the desired boot + resources. + """ + + def __init__(self, root_path, store, product_mapping): + self.root_path = root_path + self.store = store + self.product_mapping = product_mapping + super(RepoWriter, self).__init__(config={ + # Only download the latest version. Without this all versions + # will be downloaded from simplestreams. + 'max_items': 1, + }) + + def load_products(self, path=None, content_id=None): + """Overridable from `BasicMirrorWriter`.""" + # It looks as if this method only makes sense for MirrorReaders, not + # for MirrorWriters. The default MirrorWriter implementation just + # raises NotImplementedError. Stop it from doing that. + return + + def filter_version(self, data, src, target, pedigree): + """Overridable from `BasicMirrorWriter`.""" + return self.product_mapping.contains(products_exdata(src, pedigree)) + + def insert_item(self, data, src, target, pedigree, contentsource): + """Overridable from `BasicMirrorWriter`.""" + item = products_exdata(src, pedigree) + checksums = item_checksums(data) + tag = checksums['sha256'] + size = data['size'] + ftype = item['ftype'] + if ftype == 'root-image.gz': + links = insert_root_image( + self.store, tag, checksums, size, contentsource) + else: + links = insert_file( + self.store, ftype, tag, checksums, size, contentsource) + + os = get_os_from_product(item) + subarches = self.product_mapping.get(item) + link_resources( + snapshot_path=self.root_path, links=links, + osystem=os, arch=item['arch'], release=item['release'], + label=item['label'], subarches=subarches) + + +def download_boot_resources(path, store, snapshot_path, product_mapping, + keyring_file=None): + """Download boot resources for one simplestreams source. + + :param path: The Simplestreams URL for this source. + :param store: A simplestreams `ObjectStore` where downloaded resources + should be stored. + :param snapshot_path: Filesystem path to a snapshot of current upstream + boot resources. + :param product_mapping: A `ProductMapping` describing the resources to be + downloaded. + :param keyring_file: Optional path to a keyring file for verifying + signatures. + """ + writer = RepoWriter(snapshot_path, store, product_mapping) + (mirror, rpath) = path_from_mirror_url(path, None) + policy = get_signing_policy(rpath, keyring_file) + reader = UrlMirrorReader(mirror, policy=policy) + writer.sync(reader, rpath) + + +def compose_snapshot_path(storage_path): + """Put together a path for a new snapshot. + + A snapshot is a directory in `storage_path` containing boot resources. + The snapshot's name contains the date in a sortable format. + + :param storage_path: Root storage directory, + usually `/var/lib/maas/boot-resources`. + :return: Path to the snapshot directory. + """ + now = datetime.utcnow() + snapshot_name = 'snapshot-%s' % now.strftime('%Y%m%d-%H%M%S') + return os.path.join(storage_path, snapshot_name) + + +def download_all_boot_resources( + sources, storage_path, product_mapping, store=None): + """Download the actual boot resources. + + Local copies of boot resources are downloaded into a "cache" directory. + This is a raw, flat store of resources, with UUID-based filenames called + "tags." + + In addition, the downlads are hardlinked into a "snapshot directory." This + directory, named after the date and time that the snapshot was initiated, + reflects the currently available boot resources in a proper directory + hierarchy with subdirectories for architectures, releases, and so on. + + :param sources: List of dicts describing the Simplestreams sources from + which we should download. + :param storage_path: Root storage directory, + usually `/var/lib/maas/boot-resources`. + :param snapshot_path: + :param product_mapping: A `ProductMapping` describing the resources to be + downloaded. + :param store: A `FileStore` instance. Used only for testing. + :return: Path to the snapshot directory. + """ + storage_path = os.path.abspath(storage_path) + snapshot_path = compose_snapshot_path(storage_path) + # Use a FileStore as our ObjectStore implementation. It will write to the + # cache directory. + if store is None: + cache_path = os.path.join(storage_path, 'cache') + store = FileStore(cache_path) + # XXX jtv 2014-04-11: FileStore now also takes an argument called + # complete_callback, which can be used for progress reporting. + + for source in sources: + download_boot_resources( + source['url'], store, snapshot_path, product_mapping, + keyring_file=source.get('keyring')), + + return snapshot_path diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/helpers.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/helpers.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/helpers.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/helpers.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,78 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Miscellaneous small definitions in support of boot-resource import.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'get_os_from_product', + 'get_signing_policy', + 'ImageSpec', + 'maaslog', + ] + +from collections import namedtuple +import functools + +from provisioningserver.logger import get_maas_logger +from simplestreams.util import policy_read_signed + +# A tuple of the items that together select a boot image. +ImageSpec = namedtuple(b'ImageSpec', [ + 'os', + 'arch', + 'subarch', + 'release', + 'label', + ]) + + +def get_signing_policy(path, keyring=None): + """Return Simplestreams signing policy for the given path. + + :param path: Path to the Simplestreams index file. + :param keyring: Optional keyring file for verifying signatures. + :return: A "signing policy" callable. It accepts a file's content, path, + and optional keyring as arguments, and if the signature verifies + correctly, returns the content. The keyring defaults to the one you + pass. + """ + if path.endswith('.json'): + # The configuration deliberately selected an un-signed index. A signed + # index would have a suffix of '.sjson'. Use a policy that doesn't + # check anything. + policy = lambda content, path, keyring: content + else: + # Otherwise: use default Simplestreams policy for verifying signatures. + policy = policy_read_signed + + if keyring is not None: + # Pass keyring to the policy, to use if the caller inside Simplestreams + # does not provide one. + policy = functools.partial(policy, keyring=keyring) + + return policy + + +def get_os_from_product(item): + """Returns the operating system that the product is refering to. + + Originally products did not contain the os field. This handles that missing + field, by returning "ubuntu" as the operating system. Before the os field + was added to the product mapping, only Ubuntu was supported. + """ + try: + return item['os'] + except KeyError: + return "ubuntu" + + +maaslog = get_maas_logger("import-images") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/keyrings.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/keyrings.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/keyrings.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/keyrings.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,70 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). +"""Keyring management functions for the import boot images job and script.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'write_all_keyrings', + ] + +import os +from urlparse import urlsplit + +from provisioningserver.import_images.helpers import maaslog + + +def write_keyring(keyring_path, keyring_data): + """Write a keyring blob to a file. + + :param path: The path to the keyring file. + :param keyring_data: The data to write to the keyring_file, as a + base64-encoded string. + """ + maaslog.debug("Writing keyring %s to disk.", keyring_path) + with open(keyring_path, 'wb') as keyring_file: + keyring_file.write(keyring_data) + + +def calculate_keyring_name(source_url): + """Return a name for a keyring based on a URL.""" + split_url = urlsplit(source_url) + cleaned_path = split_url.path.strip('/').replace('/', '-') + keyring_name = "%s-%s.gpg" % (split_url.netloc, cleaned_path) + return keyring_name + + +def write_all_keyrings(directory, sources): + """For a given set of `sources`, write the keyrings to disk. + + :param directory: A directory where the key files should be written. Use + a dedicated temporary directory for this, and clean it up when done. + :param sources: An iterable of the sources whose keyrings need to be + written. + :return: The sources iterable, with each source whose keyring has + been written now having a "keyring" value set, pointing to the file + on disk. + """ + for source in sources: + source_url = source.get('url') + keyring_file = source.get('keyring') + keyring_data = source.get('keyring_data') + + if keyring_file is not None and keyring_data is not None: + maaslog.warning( + "Both a keyring file and keyring data were specified; " + "ignoring the keyring file.") + + if keyring_data is not None: + keyring_file = os.path.join( + directory, calculate_keyring_name(source_url)) + write_keyring(keyring_file, keyring_data) + source['keyring'] = keyring_file + return sources diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/product_mapping.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/product_mapping.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/product_mapping.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/product_mapping.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,90 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""The `ProductMapping` class.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'map_products', + ] + + +class ProductMapping: + """Mapping of product data. + + Maps a combination of boot resource metadata (`content_id`, `product_name`, + `version_name`) to a list of subarchitectures supported by that boot + resource. + """ + + def __init__(self): + self.mapping = {} + + @staticmethod + def make_key(resource): + """Extract a key tuple from `resource`. + + The key is used for indexing `mapping`. + + :param resource: A dict describing a boot resource. It must contain + the keys `content_id`, `product_name`, and `version_name`. + :return: A tuple of the resource's content ID, product name, and + version name. + """ + return ( + resource['content_id'], + resource['product_name'], + resource['version_name'], + ) + + def add(self, resource, subarch): + """Add `subarch` to the list of subarches supported by a boot resource. + + The `resource` is a dict as returned by `products_exdata`. The method + will use the values identified by keys `content_id`, `product_name`, + and `version_name`. + """ + key = self.make_key(resource) + self.mapping.setdefault(key, []) + self.mapping[key].append(subarch) + + def contains(self, resource): + """Does the dict contain a mapping for the given resource?""" + return self.make_key(resource) in self.mapping + + def get(self, resource): + """Return the mapped subarchitectures for `resource`.""" + return self.mapping[self.make_key(resource)] + + +def map_products(image_descriptions): + """Determine the subarches supported by each boot resource. + + Many subarches may be deployed by a single boot resource. We note only + subarchitectures here and ignore architectures because the metadata format + tightly couples a boot resource to its architecture. + + We can figure out for which architecture we need to use a specific boot + resource by looking at its description in the metadata. We can't do the + same with subarch, because we may want to use a boot resource only for a + specific subset of subarches. + + This function returns the relationship between boot resources and + subarchitectures as a `ProductMapping`. + + :param image_descriptions: A `BootImageMapping` containing the images' + metadata. + :return: A `ProductMapping` mapping products to subarchitectures. + """ + mapping = ProductMapping() + for image, boot_resource in image_descriptions.items(): + mapping.add(boot_resource, image.subarch) + return mapping diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/testing/factory.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/testing/factory.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/testing/factory.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/testing/factory.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,78 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Factory helpers for the `import_images` package.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'make_boot_resource', + 'make_image_spec', + 'make_maas_meta', + 'make_maas_meta_without_os', + 'set_resource', + ] + +from textwrap import dedent + +from maastesting.factory import factory +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.helpers import ImageSpec + + +def make_maas_meta(): + """Return fake maas.meta data.""" + return dedent("""\ + {"ubuntu": {"amd64": {"generic": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/raring/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-r", "subarches": "generic,hwe-p,hwe-q,hwe-r", "version_name": "20140410"}}, "trusty": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "trusty/amd64/20140416.1/root-image.gz", "product_name": "com.ubuntu.maas:v2:boot:14.04:amd64:hwe-t", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s,hwe-t", "version_name": "20140416.1"}}}, "hwe-s": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/saucy/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-s", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s", "version_name": "20140410"}}}}}}""") # NOQA + + +def make_maas_meta_without_os(): + """Return fake maas.meta data, without the os field.""" + return dedent("""\ + {"amd64": {"generic": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/raring/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-r", "subarches": "generic,hwe-p,hwe-q,hwe-r", "version_name": "20140410"}}, "trusty": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "trusty/amd64/20140416.1/root-image.gz", "product_name": "com.ubuntu.maas:v2:boot:14.04:amd64:hwe-t", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s,hwe-t", "version_name": "20140416.1"}}}, "hwe-s": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/saucy/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-s", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s", "version_name": "20140410"}}}}}""") # NOQA + + +def make_boot_resource(): + """Create a fake resource dict.""" + return { + 'content_id': factory.make_name('content_id'), + 'product_name': factory.make_name('product_name'), + 'version_name': factory.make_name('version_name'), + } + + +def make_image_spec(os=None, arch=None, subarch=None, release=None, + label=None): + """Return an `ImageSpec` with random values.""" + if os is None: + os = factory.make_name('os') + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + if release is None: + release = factory.make_name('release') + if label is None: + label = factory.make_name('label') + return ImageSpec(os, arch, subarch, release, label) + + +def set_resource(boot_dict=None, image_spec=None, resource=None): + """Add boot resource to a `BootImageMapping`, creating it if necessary.""" + if boot_dict is None: + boot_dict = BootImageMapping() + if image_spec is None: + image_spec = make_image_spec() + if resource is None: + resource = factory.make_name('boot-resource') + boot_dict.mapping[image_spec] = resource + return boot_dict diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_boot_image_mapping.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_boot_image_mapping.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_boot_image_mapping.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_boot_image_mapping.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,150 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `BootImageMapping` and its module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import json + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.testing.factory import ( + make_image_spec, + make_maas_meta, + make_maas_meta_without_os, + set_resource, + ) + + +class TestBootImageMapping(MAASTestCase): + """Tests for `BootImageMapping`.""" + + def test_initially_empty(self): + self.assertItemsEqual([], BootImageMapping().items()) + + def test_items_returns_items(self): + image = make_image_spec() + resource = factory.make_name('resource') + image_dict = set_resource(image_spec=image, resource=resource) + self.assertItemsEqual([(image, resource)], image_dict.items()) + + def test_is_empty_returns_True_if_empty(self): + self.assertTrue(BootImageMapping().is_empty()) + + def test_is_empty_returns_False_if_not_empty(self): + mapping = BootImageMapping() + mapping.setdefault(make_image_spec(), factory.make_name('resource')) + self.assertFalse(mapping.is_empty()) + + def test_setdefault_sets_unset_item(self): + image_dict = BootImageMapping() + image = make_image_spec() + resource = factory.make_name('resource') + image_dict.setdefault(image, resource) + self.assertItemsEqual([(image, resource)], image_dict.items()) + + def test_setdefault_leaves_set_item_unchanged(self): + image = make_image_spec() + old_resource = factory.make_name('resource') + image_dict = set_resource(image_spec=image, resource=old_resource) + image_dict.setdefault(image, factory.make_name('newresource')) + self.assertItemsEqual([(image, old_resource)], image_dict.items()) + + def test_set_overwrites_item(self): + image_dict = BootImageMapping() + image = make_image_spec() + resource = factory.make_name('resource') + image_dict.setdefault(image, factory.make_name('resource')) + image_dict.set(image, resource) + self.assertItemsEqual([(image, resource)], image_dict.items()) + + def test_dump_json_is_consistent(self): + image = make_image_spec() + resource = factory.make_name('resource') + image_dict_1 = set_resource(image_spec=image, resource=resource) + image_dict_2 = set_resource(image_spec=image, resource=resource) + self.assertEqual(image_dict_1.dump_json(), image_dict_2.dump_json()) + + def test_dump_json_represents_empty_dict_as_empty_object(self): + self.assertEqual('{}', BootImageMapping().dump_json()) + + def test_dump_json_represents_entry(self): + image = make_image_spec() + resource = factory.make_name('resource') + image_dict = set_resource(image_spec=image, resource=resource) + self.assertEqual( + { + image.os: { + image.arch: { + image.subarch: { + image.release: {image.label: resource}, + }, + }, + }, + }, + json.loads(image_dict.dump_json())) + + def test_dump_json_combines_similar_entries(self): + image = make_image_spec() + other_release = factory.make_name('other-release') + resource1 = factory.make_name('resource') + resource2 = factory.make_name('other-resource') + image_dict = BootImageMapping() + set_resource(image_dict, image, resource1) + set_resource( + image_dict, image._replace(release=other_release), resource2) + self.assertEqual( + { + image.os: { + image.arch: { + image.subarch: { + image.release: {image.label: resource1}, + other_release: {image.label: resource2}, + }, + }, + }, + }, + json.loads(image_dict.dump_json())) + + def test_load_json_result_matches_dump_of_own_data(self): + # Loading the test data and dumping it again should result in + # identical test data. + test_meta_file_content = make_maas_meta() + mapping = BootImageMapping.load_json(test_meta_file_content) + dumped = mapping.dump_json() + self.assertEqual(test_meta_file_content, dumped) + + def test_load_json_result_of_old_data_uses_ubuntu_as_os(self): + test_meta_file_content = make_maas_meta_without_os() + mapping = BootImageMapping.load_json(test_meta_file_content) + os = {image.os for image, _ in mapping.items()}.pop() + self.assertEqual('ubuntu', os) + + def test_load_json_returns_empty_mapping_for_invalid_json(self): + bad_json = "" + mapping = BootImageMapping.load_json(bad_json) + self.assertEqual({}, mapping.mapping) + + def test_get_image_arches_gets_arches_from_imagespecs(self): + expected_arches = set() + mapping = None + for _ in range(0, 3): + image_spec = make_image_spec() + resource = factory.make_name('resource') + expected_arches.add(image_spec.arch) + mapping = set_resource(mapping, image_spec, resource) + + self.assertEqual(expected_arches, mapping.get_image_arches()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_boot_resources.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_boot_resources.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_boot_resources.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_boot_resources.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,269 +14,487 @@ __metaclass__ = type __all__ = [] +from datetime import ( + datetime, + timedelta, + ) import errno +import hashlib +import json import os from random import randint +from subprocess import ( + PIPE, + Popen, + ) from maastesting.factory import factory +from maastesting.matchers import ( + MockAnyCall, + MockCalledWith, + ) from maastesting.testcase import MAASTestCase -from mock import MagicMock -from provisioningserver.config import BootConfig +from maastesting.utils import age_file +import mock +from provisioningserver.boot import BootMethodRegistry +import provisioningserver.config +from provisioningserver.config import BootSources from provisioningserver.import_images import boot_resources +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.testing.factory import make_image_spec +from provisioningserver.testing.config import BootSourcesFixture +from provisioningserver.utils.fs import write_text_file +from testtools.content import Content +from testtools.content_type import UTF8_TEXT +from testtools.matchers import ( + DirExists, + FileExists, + ) +import yaml + + +class TestTgtEntry(MAASTestCase): + """Tests for `tgt_entry`.""" + + def test_generates_one_target(self): + spec = make_image_spec() + osystem = factory.make_name('osystem') + image = self.make_file() + entry = boot_resources.tgt_entry( + osystem, spec.arch, spec.subarch, spec.release, spec.label, image) + # The entry looks a bit like XML, but isn't well-formed. So don't try + # to parse it as such! + self.assertIn('')) + + def test_produces_suitable_output_for_tgt_admin(self): + spec = make_image_spec() + image = self.make_file() + osystem = factory.make_name('osystem') + entry = boot_resources.tgt_entry( + osystem, spec.arch, spec.subarch, spec.release, spec.label, image) + config = self.make_file(contents=entry) + # Pretend to be root, but without requiring the actual privileges and + # without prompting for a password. In that state, run tgt-admin. + # It has to think it's root, even for a "pretend" run. + # Make it read the config we just produced, and pretend to update its + # iSCSI targets based on what it finds in the config. + # + # The only real test is that this succeed. + cmd = Popen( + [ + 'fakeroot', 'tgt-admin', + '--conf', config, + '--pretend', + '--update', 'ALL', + ], + stdout=PIPE, stderr=PIPE) + stdout, stderr = cmd.communicate() + self.addDetail('tgt-stderr', Content(UTF8_TEXT, lambda: [stderr])) + self.addDetail('tgt-stdout', Content(UTF8_TEXT, lambda: [stdout])) + self.assertEqual(0, cmd.returncode) + + +def checksum_sha256(data): + """Return the SHA256 checksum for `data`, as a hex string.""" + assert isinstance(data, bytes) + summer = hashlib.sha256() + summer.update(data) + return summer.hexdigest() -def make_image_spec(): - """Return an `ImageSpec` with random values.""" - return boot_resources.ImageSpec( - factory.make_name('arch'), - factory.make_name('subarch'), - factory.make_name('release'), - factory.make_name('label'), - ) - - -class TestIterateBootResources(MAASTestCase): - """Tests for `iterate_boot_resources`.""" - - def test_empty_hierarchy_yields_nothing(self): - self.assertItemsEqual( - [], - boot_resources.iterate_boot_resources( - boot_resources.create_empty_hierarchy())) - - def test_finds_boot_resource(self): - image_spec = make_image_spec() - arch, subarch, release, label = image_spec - self.assertItemsEqual( - [image_spec], - boot_resources.iterate_boot_resources( - {arch: {subarch: {release: {label: factory.make_name()}}}})) - - -class TestValuePassesFilterList(MAASTestCase): - """Tests for `value_passes_filter_list`.""" - - def test_nothing_passes_empty_list(self): - self.assertFalse( - boot_resources.value_passes_filter_list( - [], factory.make_name('value'))) - - def test_unmatched_value_does_not_pass(self): - self.assertFalse( - boot_resources.value_passes_filter_list( - [factory.make_name('filter')], factory.make_name('value'))) - - def test_matched_value_passes(self): - value = factory.make_name('value') - self.assertTrue( - boot_resources.value_passes_filter_list([value], value)) - - def test_value_passes_if_matched_anywhere_in_filter(self): - value = factory.make_name('value') - self.assertTrue( - boot_resources.value_passes_filter_list( - [ - factory.make_name('filter'), - value, - factory.make_name('filter'), - ], - value)) - - def test_any_value_passes_asterisk(self): - self.assertTrue( - boot_resources.value_passes_filter_list( - ['*'], factory.make_name('value'))) - - -class TestValuePassesFilter(MAASTestCase): - """Tests for `value_passes_filter`.""" - - def test_unmatched_value_does_not_pass(self): - self.assertFalse( - boot_resources.value_passes_filter( - factory.make_name('filter'), factory.make_name('value'))) - - def test_matching_value_passes(self): - value = factory.make_name('value') - self.assertTrue(boot_resources.value_passes_filter(value, value)) - - def test_any_value_matches_asterisk(self): - self.assertTrue( - boot_resources.value_passes_filter( - '*', factory.make_name('value'))) - +class TestMain(MAASTestCase): -class TestImagePassesFilter(MAASTestCase): - """Tests for `image_passes_filter`.""" + def setUp(self): + super(TestMain, self).setUp() + self.storage = self.make_dir() + self.patch( + provisioningserver.config, 'BOOT_RESOURCES_STORAGE', self.storage) + # Forcing arch to amd64 causes pxelinux.0 to be installed, giving more + # test coverage. + self.image = make_image_spec(arch='amd64') + self.os, self.arch, self.subarch, \ + self.release, self.label = self.image + self.repo = self.make_simplestreams_repo(self.image) + + def patch_maaslog(self): + """Suppress log output from the import code.""" + self.patch(boot_resources, 'maaslog') + + def make_args(self, sources="", **kwargs): + """Fake an `argumentparser` parse result.""" + args = mock.Mock() + # Set sources explicitly, otherwise boot_resources.main() gets + # confused. + args.sources = sources + for key, value in kwargs.items(): + setattr(args, key, value) + return args + + def make_simplestreams_index(self, index_dir, stream, product): + """Write a fake simplestreams index file. Return its path.""" + index_file = os.path.join(index_dir, 'index.json') + index = { + 'format': 'index:1.0', + 'updated': 'Tue, 25 Mar 2014 16:19:49 +0000', + 'index': { + stream: { + 'datatype': 'image-ids', + 'path': 'streams/v1/%s.json' % stream, + 'updated': 'Tue, 25 Mar 2014 16:19:49 +0000', + 'format': 'products:1.0', + 'products': [product], + }, + }, + } + write_text_file(index_file, json.dumps(index)) + return index_file - def make_filter_from_image(self, image_spec=None): - """Create a filter dict that matches the given `ImageSpec`. + def make_download_file(self, repo, image_spec, version, + filename='boot-kernel'): + """Fake a downloadable file in `repo`. - If `image_spec` is not given, creates a random value. + Return the new file's POSIX path, and its contents. """ - if image_spec is None: - image_spec = make_image_spec() - return { - 'arches': [image_spec.arch], - 'subarches': [image_spec.subarch], - 'release': image_spec.release, - 'labels': [image_spec.label], - } - - def test_any_image_passes_none_filter(self): - arch, subarch, release, label = make_image_spec() - self.assertTrue( - boot_resources.image_passes_filter( - None, arch, subarch, release, label)) - - def test_any_image_passes_empty_filter(self): - arch, subarch, release, label = make_image_spec() - self.assertTrue( - boot_resources.image_passes_filter( - [], arch, subarch, release, label)) - - def test_image_passes_matching_filter(self): - image = make_image_spec() - self.assertTrue( - boot_resources.image_passes_filter( - [self.make_filter_from_image(image)], - image.arch, image.subarch, image.release, image.label)) - - def test_image_does_not_pass_nonmatching_filter(self): - image = make_image_spec() - self.assertFalse( - boot_resources.image_passes_filter( - [self.make_filter_from_image()], - image.arch, image.subarch, image.release, image.label)) - - def test_image_passes_if_one_filter_matches(self): - image = make_image_spec() - self.assertTrue( - boot_resources.image_passes_filter( - [ - self.make_filter_from_image(), - self.make_filter_from_image(image), - self.make_filter_from_image(), - ], image.arch, image.subarch, image.release, image.label)) - - def test_filter_checks_release(self): - image = make_image_spec() - self.assertFalse( - boot_resources.image_passes_filter( - [ - self.make_filter_from_image(image._replace( - release=factory.make_name('other-release'))) - ], image.arch, image.subarch, image.release, image.label)) - - def test_filter_checks_arches(self): - image = make_image_spec() - self.assertFalse( - boot_resources.image_passes_filter( - [ - self.make_filter_from_image(image._replace( - arch=factory.make_name('other-arch'))) - ], image.arch, image.subarch, image.release, image.label)) - - def test_filter_checks_subarches(self): - image = make_image_spec() - self.assertFalse( - boot_resources.image_passes_filter( - [ - self.make_filter_from_image(image._replace( - subarch=factory.make_name('other-subarch'))) - ], image.arch, image.subarch, image.release, image.label)) - - def test_filter_checks_labels(self): - image = make_image_spec() - self.assertFalse( - boot_resources.image_passes_filter( - [ - self.make_filter_from_image(image._replace( - label=factory.make_name('other-label'))) - ], image.arch, image.subarch, image.release, image.label)) - - -class TestBootMerge(MAASTestCase): - """Tests for `boot_merge`.""" - - def make_resource(self, boot_dict=None, image_spec=None, resource=None): - """Add a boot resource to `boot_dict`, creating it if necessary.""" - if boot_dict is None: - boot_dict = {} - if image_spec is None: - image_spec = make_image_spec() - if resource is None: - resource = factory.make_name('boot-resource') - arch, subarch, release, label = image_spec - # Drill down into the dict; along the way, create any missing levels of - # nested dicts. - nested_dict = boot_dict - for level in (arch, subarch, release): - nested_dict.setdefault(level, {}) - nested_dict = nested_dict[level] - # At the bottom level, indexed by "label," insert "resource" as the - # value. - nested_dict[label] = resource - return boot_dict - - def test_integrates(self): - # End-to-end scenario for boot_merge: start with an empty boot - # resources dict, and receive one resource from Simplestreams. - total_resources = boot_resources.create_empty_hierarchy() - resources_from_repo = self.make_resource() - boot_resources.boot_merge(total_resources, resources_from_repo.copy()) - # Since we started with an empty dict, the result contains the same - # item that we got from Simplestreams, and nothing else. - self.assertEqual(resources_from_repo, total_resources) + path = [ + image_spec.release, + image_spec.arch, + version, + image_spec.release, + image_spec.subarch, + filename, + ] + native_path = os.path.join(repo, *path) + os.makedirs(os.path.dirname(native_path)) + contents = ("Contents: %s" % filename).encode('utf-8') + write_text_file(native_path, contents) + # Return POSIX path for inclusion in Simplestreams data, not + # system-native path for filesystem access. + return '/'.join(path), contents + + def make_simplestreams_product_index(self, index_dir, stream, product, + image_spec, os_release, + download_file, contents, version): + """Write a fake Simplestreams product index file. - def test_obeys_filters(self): - filters = [ + The image is written into the directory that holds the indexes. It + contains one downloadable file, as specified by the arguments. + """ + index = { + 'format': 'products:1.0', + 'data-type': 'image-ids', + 'updated': 'Tue, 25 Mar 2014 16:19:49 +0000', + 'content_id': stream, + 'products': { + product: { + 'versions': { + version: { + 'items': { + 'boot-kernel': { + 'ftype': 'boot-kernel', + '_fake': 'fake-data: %s' % download_file, + 'version': os_release, + 'release': image_spec.release, + 'path': download_file, + 'sha256': checksum_sha256(contents), + 'arch': image_spec.arch, + 'subarches': image_spec.subarch, + 'size': len(contents), + }, + }, + }, + }, + 'subarch': image_spec.subarch, + 'krel': image_spec.release, + 'label': image_spec.label, + 'kflavor': image_spec.subarch, + 'version': os_release, + 'subarches': [image_spec.subarch], + 'release': image_spec.release, + 'arch': image_spec.arch, + 'os': image_spec.os, + }, + }, + } + write_text_file( + os.path.join(index_dir, '%s.json' % stream), + json.dumps(index)) + + def make_simplestreams_repo(self, image_spec): + """Fake a local simplestreams repository containing the given image. + + This creates a temporary directory that looks like a realistic + Simplestreams repository, containing one downloadable file for the + given `image_spec`. + """ + os_release = '%d.%.2s' % ( + randint(1, 99), + ('04' if randint(0, 1) == 0 else '10'), + ) + repo = self.make_dir() + index_dir = os.path.join(repo, 'streams', 'v1') + os.makedirs(index_dir) + stream = 'com.ubuntu.maas:daily:v2:download' + product = 'com.ubuntu.maas:boot:%s:%s:%s' % ( + os_release, + image_spec.arch, + image_spec.subarch, + ) + version = '20140317' + download_file, sha = self.make_download_file(repo, image_spec, version) + self.make_simplestreams_product_index( + index_dir, stream, product, image_spec, os_release, download_file, + sha, version) + index = self.make_simplestreams_index(index_dir, stream, product) + return index + + def make_working_args(self): + """Create a set of working arguments for the script.""" + # Prepare a fake repository and sources. + sources = [ { - 'arches': [factory.make_name('other-arch')], - 'subarches': [factory.make_name('other-subarch')], - 'release': factory.make_name('other-release'), - 'label': [factory.make_name('other-label')], + 'url': self.repo, + 'selections': [ + { + 'os': self.os, + 'release': self.release, + 'arches': [self.arch], + 'subarches': [self.subarch], + 'labels': [self.label], + }, + ], }, ] - total_resources = boot_resources.create_empty_hierarchy() - resources_from_repo = self.make_resource() - boot_resources.boot_merge( - total_resources, resources_from_repo, filters=filters) - self.assertEqual({}, total_resources) - - def test_does_not_overwrite_existing_entry(self): - image = make_image_spec() - original_resources = self.make_resource( - resource="Original resource", image_spec=image) - total_resources = original_resources.copy() - resources_from_repo = self.make_resource( - resource="New resource", image_spec=image) - boot_resources.boot_merge(total_resources, resources_from_repo.copy()) - self.assertEqual(original_resources, total_resources) + sources_file = self.make_file( + 'sources.yaml', contents=yaml.safe_dump(sources)) + return self.make_args(sources_file=sources_file) + def test_successful_run(self): + """Integration-test a successful run of the importer. -class TestMain(MAASTestCase): - - def test_raises_ioerror_when_no_config_file_found(self): - # Suppress log output. - self.logger = self.patch(boot_resources, 'logger') - filename = "/tmp/%s" % factory.make_name("config") - self.assertFalse(os.path.exists(filename)) - args = MagicMock() - args.config_file = filename + This runs as much realistic code as it can, exercising most of the + integration points for a real import. + """ + # Patch out things that we don't want running during the test. Patch + # at a low level, so that we exercise all the function calls that a + # unit test might not put to the test. + self.patch_maaslog() + self.patch(boot_resources, 'call_and_check') + + # We'll go through installation of a PXE boot loader here, but skip + # all other boot loader types. Testing them all is a job for proper + # unit tests. + for method_name, boot_method in BootMethodRegistry: + if method_name != 'pxe': + self.patch(boot_method, 'install_bootloader') + + args = self.make_working_args() + osystem = self.os + arch = self.arch + subarch = self.subarch + release = self.release + label = self.label + + # Run the import code. + boot_resources.main(args) + + # Verify the reuslts. + self.assertThat(os.path.join(self.storage, 'cache'), DirExists()) + current = os.path.join(self.storage, 'current') + self.assertTrue(os.path.islink(current)) + self.assertThat(current, DirExists()) + self.assertThat(os.path.join(current, 'pxelinux.0'), FileExists()) + self.assertThat(os.path.join(current, 'maas.meta'), FileExists()) + self.assertThat(os.path.join(current, 'maas.tgt'), FileExists()) + self.assertThat( + os.path.join( + current, osystem, arch, subarch, self.release, self.label), + DirExists()) + + # Verify the contents of the "meta" file. + with open(os.path.join(current, 'maas.meta'), 'rb') as meta_file: + meta_data = json.load(meta_file) + self.assertEqual([osystem], meta_data.keys()) + self.assertEqual([arch], meta_data[osystem].keys()) + self.assertEqual([subarch], meta_data[osystem][arch].keys()) + self.assertEqual([release], meta_data[osystem][arch][subarch].keys()) + self.assertEqual( + [label], meta_data[osystem][arch][subarch][release].keys()) + self.assertItemsEqual( + [ + 'content_id', + 'path', + 'product_name', + 'version_name', + 'subarches', + ], + meta_data[osystem][arch][subarch][release][label].keys()) + + def test_warns_if_no_sources_selected(self): + self.patch_maaslog() + sources_fixture = self.useFixture(BootSourcesFixture([])) + args = self.make_args(sources_file=sources_fixture.filename) + + boot_resources.main(args) + + self.assertThat( + boot_resources.maaslog.warn, + MockAnyCall("Can't import: region did not provide a source.")) + + def test_warns_if_no_boot_resources_found(self): + # The import code used to crash when no resources were found in the + # Simplestreams repositories (bug 1305758). This could happen easily + # with mistakes in the sources. Now, you just get a logged warning. + sources_fixture = self.useFixture(BootSourcesFixture( + [ + { + 'url': self.make_dir(), + 'keyring': factory.make_name('keyring'), + 'selections': [{'release': factory.make_name('release')}], + }, + ])) + self.patch(boot_resources, 'download_all_image_descriptions') + boot_resources.download_all_image_descriptions.return_value = ( + BootImageMapping()) + self.patch_maaslog() + self.patch(boot_resources, 'RepoWriter') + args = self.make_args(sources_file=sources_fixture.filename) + + boot_resources.main(args) + + self.assertThat( + boot_resources.maaslog.warn, + MockAnyCall( + "Finished importing boot images, the region does not have " + "any boot images available.")) + + def test_raises_ioerror_when_no_sources_file_found(self): + self.patch_maaslog() + no_sources = os.path.join( + self.make_dir(), '%s.yaml' % factory.make_name('no-sources')) self.assertRaises( boot_resources.NoConfigFile, - boot_resources.main, args) + boot_resources.main, self.make_args(sources_file=no_sources)) def test_raises_non_ENOENT_IOErrors(self): # main() will raise a NoConfigFile error when it encounters an # ENOENT IOError, but will otherwise just re-raise the original # IOError. - args = MagicMock() - mock_load_from_cache = self.patch(BootConfig, 'load_from_cache') + mock_load = self.patch(BootSources, 'load') other_error = IOError(randint(errno.ENOENT + 1, 1000)) - mock_load_from_cache.side_effect = other_error - # Suppress log output. - self.logger = self.patch(boot_resources, 'logger') - raised_error = self.assertRaises(IOError, boot_resources.main, args) + mock_load.side_effect = other_error + self.patch_maaslog() + raised_error = self.assertRaises( + IOError, + boot_resources.main, self.make_args()) self.assertEqual(other_error, raised_error) + + def test_raises_error_when_no_sources_passed(self): + # main() raises an error when neither a sources file nor a sources + # listing is specified. + self.patch_maaslog() + self.assertRaises( + boot_resources.NoConfigFile, + boot_resources.main, self.make_args(sources="", sources_file="")) + + +class TestMetaContains(MAASTestCase): + """Tests for the `meta_contains` function.""" + + def make_meta_file(self, content=None): + if content is None: + content = factory.make_string() + storage = self.make_dir() + current = os.path.join(storage, 'current') + os.mkdir(current) + return storage, factory.make_file(current, 'maas.meta', content) + + def test_matching_content_is_compared_True(self): + content = factory.make_string() + storage, meta_file = self.make_meta_file(content) + self.assertTrue(boot_resources.meta_contains(storage, content)) + + def test_mismatching_content_is_compared_False(self): + content = factory.make_string() + storage, meta_file = self.make_meta_file() + self.assertFalse(boot_resources.meta_contains(storage, content)) + + def test_meta_contains_updates_file_timestamp(self): + content = factory.make_string() + storage, meta_file = self.make_meta_file(content) + + # Change the file's timestamp to a week ago. + one_week_ago = timedelta(weeks=1).total_seconds() + age_file(meta_file, one_week_ago) + + boot_resources.meta_contains(storage, content) + + # Check the timestamp was updated. + expected_date = datetime.now() + actual_date = datetime.fromtimestamp(int(os.path.getmtime(meta_file))) + self.assertEqual(expected_date.day, actual_date.day) + + +class TestParseSources(MAASTestCase): + """Tests for the `parse_sources` function.""" + + def test_parses_sources(self): + self.patch(boot_resources, 'maaslog') + sources = [ + { + 'keyring': factory.make_name("keyring"), + 'keyring_data': '', + 'url': factory.make_name("something"), + 'selections': [ + { + 'os': factory.make_name("os"), + 'release': factory.make_name("release"), + 'arches': [factory.make_name("arch")], + 'subarches': [factory.make_name("subarch")], + 'labels': [factory.make_name("label")], + }, + ], + }, + ] + parsed_sources = boot_resources.parse_sources(yaml.safe_dump(sources)) + self.assertEqual(sources, parsed_sources) + + +class TestImportImages(MAASTestCase): + """Tests for the `import_images`() function.""" + + def test_writes_source_keyrings(self): + # Stop import_images() from actually doing anything. + self.patch(boot_resources, 'maaslog') + self.patch(boot_resources, 'call_and_check') + self.patch(boot_resources, 'download_all_boot_resources') + self.patch(boot_resources, 'download_all_image_descriptions') + self.patch(boot_resources, 'install_boot_loaders') + self.patch(boot_resources, 'update_current_symlink') + self.patch(boot_resources, 'write_snapshot_metadata') + self.patch(boot_resources, 'write_targets_conf') + self.patch(boot_resources, 'update_targets_conf') + + fake_write_all_keyrings = self.patch( + boot_resources, 'write_all_keyrings') + sources = [ + { + 'keyring_data': self.getUniqueString(), + 'url': factory.make_name("something"), + 'selections': [ + { + 'os': factory.make_name("os"), + 'release': factory.make_name("release"), + 'arches': [factory.make_name("arch")], + 'subarches': [factory.make_name("subarch")], + 'labels': [factory.make_name("label")], + }, + ], + }, + ], + boot_resources.import_images(sources) + self.assertThat( + fake_write_all_keyrings, MockCalledWith(mock.ANY, sources)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_cleanup.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_cleanup.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_cleanup.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_cleanup.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,106 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the `cleanup` module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os +from random import randint + +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from provisioningserver.import_images import cleanup + + +class TestCleanup(MAASTestCase): + + def make_snapshot_dir(self, storage): + name = factory.make_name('snapshot') + path = os.path.join(storage, name) + os.mkdir(path) + return path + + def make_cache_file(self, storage, link_count=0): + cache_dir = os.path.join(storage, 'cache') + if not os.path.exists(cache_dir): + os.mkdir(cache_dir) + + cache_file = factory.make_name('cache') + cache_path = os.path.join(cache_dir, cache_file) + open(cache_path, 'wb').close() + + link_dir = os.path.join(storage, 'links') + if not os.path.exists(link_dir): + os.mkdir(link_dir) + + for i in range(link_count): + link_path = os.path.join(link_dir, '%s-%d' % (cache_file, i)) + os.link(cache_path, link_path) + return cache_path + + def test_list_old_snapshots_returns_all_but_current_directory(self): + storage = self.make_dir() + snapshots = [self.make_snapshot_dir(storage) for _ in range(3)] + current_snapshot = self.make_snapshot_dir(storage) + os.symlink( + current_snapshot, os.path.join(storage, 'current')) + self.assertItemsEqual(snapshots, cleanup.list_old_snapshots(storage)) + + def test_cleanup_snapshots_removes_all_old_snapshots(self): + storage = self.make_dir() + snapshots = [self.make_snapshot_dir(storage) for _ in range(3)] + current_snapshot = self.make_snapshot_dir(storage) + os.symlink( + current_snapshot, os.path.join(storage, 'current')) + cleanup.cleanup_snapshots(storage) + remaining_snapshots = [ + snapshot + for snapshot in snapshots + if os.path.exists(snapshot) + ] + self.assertEqual([], remaining_snapshots) + + def test_list_unused_cache_files_returns_all_files_nlink_equal_one(self): + storage = self.make_dir() + cache_nlink_1 = [self.make_cache_file(storage) for _ in range(3)] + for _ in range(3): + self.make_cache_file(storage, link_count=randint(1, 3)) + self.assertItemsEqual( + cache_nlink_1, cleanup.list_unused_cache_files(storage)) + + def test_cleanup_cache_removes_all_files_nlink_equal_one(self): + storage = self.make_dir() + for _ in range(3): + self.make_cache_file(storage) + cache_nlink_greater_than_1 = [ + self.make_cache_file(storage, link_count=randint(1, 3)) + for _ in range(3) + ] + cleanup.cleanup_cache(storage) + cache_dir = os.path.join(storage, 'cache') + remaining_cache = [ + os.path.join(cache_dir, filename) + for filename in os.listdir(cache_dir) + if os.path.isfile(os.path.join(cache_dir, filename)) + ] + self.assertItemsEqual( + cache_nlink_greater_than_1, remaining_cache) + + def test_cleanup_snapshots_and_cache_calls(self): + storage = self.make_dir() + mock_snapshots = self.patch_autospec(cleanup, 'cleanup_snapshots') + mock_cache = self.patch_autospec(cleanup, 'cleanup_cache') + cleanup.cleanup_snapshots_and_cache(storage) + self.assertThat(mock_snapshots, MockCalledOnceWith(storage)) + self.assertThat(mock_cache, MockCalledOnceWith(storage)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_download_descriptions.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_download_descriptions.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_download_descriptions.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_download_descriptions.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,336 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the `download_descriptions` module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver.import_images import download_descriptions +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.download_descriptions import ( + clean_up_repo_item, + RepoDumper, + ) +from provisioningserver.import_images.testing.factory import ( + make_image_spec, + set_resource, + ) + + +class TestValuePassesFilterList(MAASTestCase): + """Tests for `value_passes_filter_list`.""" + + def test_nothing_passes_empty_list(self): + self.assertFalse( + download_descriptions.value_passes_filter_list( + [], factory.make_name('value'))) + + def test_unmatched_value_does_not_pass(self): + self.assertFalse( + download_descriptions.value_passes_filter_list( + [factory.make_name('filter')], factory.make_name('value'))) + + def test_matched_value_passes(self): + value = factory.make_name('value') + self.assertTrue( + download_descriptions.value_passes_filter_list([value], value)) + + def test_value_passes_if_matched_anywhere_in_filter(self): + value = factory.make_name('value') + self.assertTrue( + download_descriptions.value_passes_filter_list( + [ + factory.make_name('filter'), + value, + factory.make_name('filter'), + ], + value)) + + def test_any_value_passes_asterisk(self): + self.assertTrue( + download_descriptions.value_passes_filter_list( + ['*'], factory.make_name('value'))) + + +class TestValuePassesFilter(MAASTestCase): + """Tests for `value_passes_filter`.""" + + def test_unmatched_value_does_not_pass(self): + self.assertFalse( + download_descriptions.value_passes_filter( + factory.make_name('filter'), factory.make_name('value'))) + + def test_matching_value_passes(self): + value = factory.make_name('value') + self.assertTrue( + download_descriptions.value_passes_filter(value, value)) + + def test_any_value_matches_asterisk(self): + self.assertTrue( + download_descriptions.value_passes_filter( + '*', factory.make_name('value'))) + + +class TestImagePassesFilter(MAASTestCase): + """Tests for `image_passes_filter`.""" + + def make_filter_from_image(self, image_spec=None): + """Create a filter dict that matches the given `ImageSpec`. + + If `image_spec` is not given, creates a random value. + """ + if image_spec is None: + image_spec = make_image_spec() + return { + 'os': image_spec.os, + 'arches': [image_spec.arch], + 'subarches': [image_spec.subarch], + 'release': image_spec.release, + 'labels': [image_spec.label], + } + + def test_any_image_passes_none_filter(self): + os, arch, subarch, release, label = make_image_spec() + self.assertTrue( + download_descriptions.image_passes_filter( + None, os, arch, subarch, release, label)) + + def test_any_image_passes_empty_filter(self): + os, arch, subarch, release, label = make_image_spec() + self.assertTrue( + download_descriptions.image_passes_filter( + [], os, arch, subarch, release, label)) + + def test_image_passes_matching_filter(self): + image = make_image_spec() + self.assertTrue( + download_descriptions.image_passes_filter( + [self.make_filter_from_image(image)], + image.os, image.arch, image.subarch, + image.release, image.label)) + + def test_image_does_not_pass_nonmatching_filter(self): + image = make_image_spec() + self.assertFalse( + download_descriptions.image_passes_filter( + [self.make_filter_from_image()], + image.os, image.arch, image.subarch, + image.release, image.label)) + + def test_image_passes_if_one_filter_matches(self): + image = make_image_spec() + self.assertTrue( + download_descriptions.image_passes_filter( + [ + self.make_filter_from_image(), + self.make_filter_from_image(image), + self.make_filter_from_image(), + ], + image.os, image.arch, image.subarch, + image.release, image.label)) + + def test_filter_checks_release(self): + image = make_image_spec() + self.assertFalse( + download_descriptions.image_passes_filter( + [ + self.make_filter_from_image(image._replace( + release=factory.make_name('other-release'))) + ], + image.os, image.arch, image.subarch, + image.release, image.label)) + + def test_filter_checks_arches(self): + image = make_image_spec() + self.assertFalse( + download_descriptions.image_passes_filter( + [ + self.make_filter_from_image(image._replace( + arch=factory.make_name('other-arch'))) + ], + image.os, image.arch, image.subarch, + image.release, image.label)) + + def test_filter_checks_subarches(self): + image = make_image_spec() + self.assertFalse( + download_descriptions.image_passes_filter( + [ + self.make_filter_from_image(image._replace( + subarch=factory.make_name('other-subarch'))) + ], + image.os, image.arch, image.subarch, + image.release, image.label)) + + def test_filter_checks_labels(self): + image = make_image_spec() + self.assertFalse( + download_descriptions.image_passes_filter( + [ + self.make_filter_from_image(image._replace( + label=factory.make_name('other-label'))) + ], + image.os, image.arch, image.subarch, + image.release, image.label)) + + +class TestBootMerge(MAASTestCase): + """Tests for `boot_merge`.""" + + def test_integrates(self): + # End-to-end scenario for boot_merge: start with an empty boot + # resources dict, and receive one resource from Simplestreams. + total_resources = BootImageMapping() + resources_from_repo = set_resource() + download_descriptions.boot_merge(total_resources, resources_from_repo) + # Since we started with an empty dict, the result contains the same + # item that we got from Simplestreams, and nothing else. + self.assertEqual(resources_from_repo.mapping, total_resources.mapping) + + def test_obeys_filters(self): + filters = [ + { + 'os': factory.make_name('os'), + 'arches': [factory.make_name('other-arch')], + 'subarches': [factory.make_name('other-subarch')], + 'release': factory.make_name('other-release'), + 'label': [factory.make_name('other-label')], + }, + ] + total_resources = BootImageMapping() + resources_from_repo = set_resource() + download_descriptions.boot_merge( + total_resources, resources_from_repo, filters=filters) + self.assertEqual({}, total_resources.mapping) + + def test_does_not_overwrite_existing_entry(self): + image = make_image_spec() + total_resources = set_resource( + resource="Original resource", image_spec=image) + original_resources = total_resources.mapping.copy() + resources_from_repo = set_resource( + resource="New resource", image_spec=image) + download_descriptions.boot_merge(total_resources, resources_from_repo) + self.assertEqual(original_resources, total_resources.mapping) + + +class TestRepoDumper(MAASTestCase): + """Tests for `RepoDumper`.""" + + def make_item(self, os=None, release=None, arch=None, + subarch=None, subarches=None, label=None): + if os is None: + os = factory.make_name('os') + if release is None: + release = factory.make_name('release') + if arch is None: + arch = factory.make_name('arch') + if subarch is None: + subarch = factory.make_name('subarch') + if subarches is None: + subarches = [factory.make_name('subarch') for _ in range(3)] + if subarch not in subarches: + subarches.append(subarch) + if label is None: + label = factory.make_name('label') + item = { + 'content_id': factory.make_name('content_id'), + 'product_name': factory.make_name('product_name'), + 'version_name': factory.make_name('version_name'), + 'path': factory.make_name('path'), + 'os': os, + 'release': release, + 'arch': arch, + 'subarch': subarch, + 'subarches': ','.join(subarches), + 'label': label, + } + return item, clean_up_repo_item(item) + + def test_insert_item_adds_item_per_subarch(self): + boot_images_dict = BootImageMapping() + dumper = RepoDumper(boot_images_dict) + subarches = [factory.make_name('subarch') for _ in range(3)] + item, _ = self.make_item( + subarch=subarches.pop(), subarches=subarches) + self.patch( + download_descriptions, 'products_exdata').return_value = item + dumper.insert_item( + sentinel.data, sentinel.src, sentinel.target, + sentinel.pedigree, sentinel.contentsource) + image_specs = [ + make_image_spec( + os=item['os'], release=item['release'], + arch=item['arch'], subarch=subarch, + label=item['label']) + for subarch in subarches + ] + self.assertItemsEqual(image_specs, boot_images_dict.mapping.keys()) + + def test_insert_item_sets_compat_item_specific_to_subarch(self): + boot_images_dict = BootImageMapping() + dumper = RepoDumper(boot_images_dict) + subarches = [factory.make_name('subarch') for _ in range(5)] + compat_subarch = subarches.pop() + item, _ = self.make_item(subarch=subarches.pop(), subarches=subarches) + second_item, compat_item = self.make_item( + os=item['os'], release=item['release'], arch=item['arch'], + subarch=compat_subarch, subarches=[compat_subarch], + label=item['label']) + self.patch( + download_descriptions, + 'products_exdata').side_effect = [item, second_item] + for _ in range(2): + dumper.insert_item( + sentinel.data, sentinel.src, sentinel.target, + sentinel.pedigree, sentinel.contentsource) + image_spec = make_image_spec( + os=item['os'], release=item['release'], + arch=item['arch'], subarch=compat_subarch, + label=item['label']) + self.assertEqual(compat_item, boot_images_dict.mapping[image_spec]) + + def test_insert_item_sets_generic_to_release_item_for_hwe(self): + boot_images_dict = BootImageMapping() + dumper = RepoDumper(boot_images_dict) + os = 'ubuntu' + release = 'precise' + arch = 'amd64' + label = 'release' + hwep_subarch = 'hwe-p' + hwep_subarches = ['generic', 'hwe-p'] + hwes_subarch = 'hwe-s' + hwes_subarches = ['generic', 'hwe-p', 'hwe-s'] + hwep_item, compat_item = self.make_item( + os=os, release=release, + arch=arch, subarch=hwep_subarch, + subarches=hwep_subarches, label=label) + hwes_item, _ = self.make_item( + os=os, release=release, + arch=arch, subarch=hwes_subarch, + subarches=hwes_subarches, label=label) + self.patch( + download_descriptions, + 'products_exdata').side_effect = [hwep_item, hwes_item] + for _ in range(2): + dumper.insert_item( + sentinel.data, sentinel.src, sentinel.target, + sentinel.pedigree, sentinel.contentsource) + image_spec = make_image_spec( + os=os, release=release, arch=arch, subarch='generic', + label=label) + self.assertEqual(compat_item, boot_images_dict.mapping[image_spec]) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_download_resources.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_download_resources.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_download_resources.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_download_resources.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,103 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `provisioningserver.import_images.download_resources`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from datetime import datetime +import os + +from maastesting.matchers import MockCalledWith +from maastesting.testcase import MAASTestCase +import mock +from provisioningserver.import_images import download_resources +from provisioningserver.import_images.product_mapping import ProductMapping +from simplestreams.objectstores import FileStore + + +class MockDateTime(mock.MagicMock): + """A class for faking datetimes.""" + + _utcnow = datetime.utcnow() + + @classmethod + def utcnow(cls): + return cls._utcnow + + +class TestDownloadAllBootResources(MAASTestCase): + """Tests for `download_all_boot_resources`().""" + + def test_returns_snapshot_path(self): + self.patch(download_resources, 'datetime', MockDateTime) + storage_path = self.make_dir() + expected_path = os.path.join( + storage_path, + 'snapshot-%s' % MockDateTime._utcnow.strftime('%Y%m%d-%H%M%S')) + self.assertEqual( + expected_path, + download_resources.download_all_boot_resources( + sources=[], storage_path=storage_path, + product_mapping=None)) + + def test_calls_download_boot_resources(self): + self.patch(download_resources, 'datetime', MockDateTime) + storage_path = self.make_dir() + snapshot_path = download_resources.compose_snapshot_path( + storage_path) + cache_path = os.path.join(storage_path, 'cache') + file_store = FileStore(cache_path) + source = { + 'url': 'http://example.com', + 'keyring': self.make_file("keyring"), + } + product_mapping = ProductMapping() + fake = self.patch(download_resources, 'download_boot_resources') + download_resources.download_all_boot_resources( + sources=[source], storage_path=storage_path, + product_mapping=product_mapping, store=file_store) + self.assertThat( + fake, + MockCalledWith( + source['url'], file_store, snapshot_path, product_mapping, + keyring_file=source['keyring'])) + + +class TestDownloadBootResources(MAASTestCase): + """Tests for `download_boot_resources()`.""" + + def test_syncs_repo(self): + fake_sync = self.patch(download_resources.RepoWriter, 'sync') + storage_path = self.make_dir() + snapshot_path = self.make_dir() + cache_path = os.path.join(storage_path, 'cache') + file_store = FileStore(cache_path) + source_url = "http://maas.ubuntu.com/images/ephemeral-v2/releases/" + + download_resources.download_boot_resources( + source_url, file_store, snapshot_path, None, None) + self.assertEqual(1, len(fake_sync.mock_calls)) + + +class TestComposeSnapshotPath(MAASTestCase): + """Tests for `compose_snapshot_path`().""" + + def test_returns_path_under_storage_path(self): + self.patch(download_resources, 'datetime', MockDateTime) + storage_path = self.make_dir() + expected_path = os.path.join( + storage_path, + 'snapshot-%s' % MockDateTime._utcnow.strftime('%Y%m%d-%H%M%S')) + self.assertEqual( + expected_path, + download_resources.compose_snapshot_path(storage_path)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_helpers.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_helpers.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_helpers.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_helpers.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,73 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the `helpers` module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +import mock +from provisioningserver.import_images import helpers +from simplestreams.util import SignatureMissingException + + +class TestGetSigningPolicy(MAASTestCase): + """Tests for `get_signing_policy`.""" + + def test_picks_nonchecking_policy_for_json_index(self): + path = 'streams/v1/index.json' + policy = helpers.get_signing_policy(path) + content = factory.make_string() + self.assertEqual( + content, + policy(content, path, factory.make_name('keyring'))) + + def test_picks_checking_policy_for_sjson_index(self): + path = 'streams/v1/index.sjson' + content = factory.make_string() + policy = helpers.get_signing_policy(path) + self.assertRaises( + SignatureMissingException, + policy, content, path, factory.make_name('keyring')) + + def test_picks_checking_policy_for_json_gpg_index(self): + path = 'streams/v1/index.json.gpg' + content = factory.make_string() + policy = helpers.get_signing_policy(path) + self.assertRaises( + SignatureMissingException, + policy, content, path, factory.make_name('keyring')) + + def test_injects_default_keyring_if_passed(self): + path = 'streams/v1/index.json.gpg' + content = factory.make_string() + keyring = factory.make_name('keyring') + self.patch(helpers, 'policy_read_signed') + policy = helpers.get_signing_policy(path, keyring) + policy(content, path) + self.assertThat( + helpers.policy_read_signed, + MockCalledOnceWith(mock.ANY, mock.ANY, keyring=keyring)) + + +class TestGetOSFromProduct(MAASTestCase): + """Tests for `get_os_from_product`.""" + + def test_returns_os_from_product(self): + os = factory.make_name('os') + product = {'os': os} + self.assertEqual(os, helpers.get_os_from_product(product)) + + def test_returns_ubuntu_if_missing(self): + self.assertEqual('ubuntu', helpers.get_os_from_product({})) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_keyrings.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_keyrings.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_keyrings.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_keyrings.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,115 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). +"""Tests for the import_images keyring management functions.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledWith, + MockCallsMatch, + ) +from maastesting.testcase import MAASTestCase +import mock +from provisioningserver.import_images import keyrings +from testtools.matchers import FileContains + + +class TestWriteKeyring(MAASTestCase): + """Tests for `write_keyring().`""" + + def test_writes_keyring_to_file(self): + keyring_data = b"A keyring! My kingdom for a keyring!" + keyring_path = os.path.join(self.make_dir(), "a-keyring-file") + keyrings.write_keyring(keyring_path, keyring_data) + self.assertTrue(os.path.exists(keyring_path)) + self.assertThat(keyring_path, FileContains(keyring_data)) + + +class TestCalculateKeyringName(MAASTestCase): + """Tests for `calculate_keyring_name()`.""" + + def test_creates_name_from_url(self): + parts = [self.getUniqueString() for _ in range(1, 5)] + source_url = "http://example.com/%s/" % "/".join(parts) + expected_keyring_name = "example.com-%s.gpg" % "-".join(parts) + self.assertEqual( + expected_keyring_name, + keyrings.calculate_keyring_name(source_url)) + + +class TestWriteAllKeyrings(MAASTestCase): + """Test for the `write_all_keyrings()` function.""" + + def test_writes_keyring_data(self): + fake_write_keyring = self.patch(keyrings, 'write_keyring') + + sources = [{ + 'url': "http://%s" % self.getUniqueString(), + 'keyring_data': factory.make_bytes(), + } for _ in range(5)] + + keyring_path = self.make_dir() + + keyrings.write_all_keyrings(keyring_path, sources) + + expected_calls = ( + mock.call( + os.path.join( + keyring_path, + keyrings.calculate_keyring_name(source['url'])), + source['keyring_data']) + for source in sources) + self.assertThat(fake_write_keyring, MockCallsMatch(*expected_calls)) + + def test_returns_sources(self): + self.patch(keyrings, 'write_keyring') + sources = [{ + 'url': "http://%s" % self.getUniqueString(), + 'keyring_data': factory.make_bytes(), + } for _ in range(5)] + + keyring_path = self.make_dir() + + expected_values = [ + os.path.join( + keyring_path, + keyrings.calculate_keyring_name(source['url'])) + for source in sources] + + returned_sources = keyrings.write_all_keyrings(keyring_path, sources) + actual_values = [ + source.get('keyring') for source in returned_sources] + self.assertEqual(expected_values, actual_values) + + def test_ignores_existing_keyrings(self): + self.patch(keyrings, 'write_keyring') + fake_maaslog = self.patch(keyrings, 'maaslog') + source = { + 'url': self.getUniqueString(), + 'keyring': self.getUniqueString(), + 'keyring_data': self.getUniqueString(), + } + + keyring_path = self.make_dir() + + [returned_source] = keyrings.write_all_keyrings(keyring_path, [source]) + expected_keyring = os.path.join( + keyring_path, keyrings.calculate_keyring_name(source['url'])) + self.assertEqual(expected_keyring, returned_source.get('keyring')) + self.assertThat( + fake_maaslog.warning, + MockCalledWith( + "Both a keyring file and keyring data were specified; " + "ignoring the keyring file.")) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_product_mapping.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_product_mapping.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_product_mapping.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_product_mapping.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,177 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the `ProductMapping` class.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.import_images.boot_image_mapping import ( + BootImageMapping, + ) +from provisioningserver.import_images.product_mapping import ( + map_products, + ProductMapping, + ) +from provisioningserver.import_images.testing.factory import ( + make_boot_resource, + make_image_spec, + set_resource, + ) + + +class TestProductMapping(MAASTestCase): + """Tests for `ProductMapping`.""" + + def test_initially_empty(self): + self.assertEqual({}, ProductMapping().mapping) + + def test_make_key_extracts_identifying_items(self): + resource = make_boot_resource() + content_id = resource['content_id'] + product_name = resource['product_name'] + version_name = resource['version_name'] + self.assertEqual( + (content_id, product_name, version_name), + ProductMapping.make_key(resource)) + + def test_make_key_ignores_other_items(self): + resource = make_boot_resource() + resource['other_item'] = factory.make_name('other') + self.assertEqual( + ( + resource['content_id'], + resource['product_name'], + resource['version_name'], + ), + ProductMapping.make_key(resource)) + + def test_make_key_fails_if_key_missing(self): + resource = make_boot_resource() + del resource['version_name'] + self.assertRaises( + KeyError, + ProductMapping.make_key, resource) + + def test_add_creates_subarches_list_if_needed(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource, subarch) + self.assertEqual( + {product_dict.make_key(resource): [subarch]}, + product_dict.mapping) + + def test_add_appends_to_existing_list(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarches = [factory.make_name('subarch') for _ in range(2)] + for subarch in subarches: + product_dict.add(resource, subarch) + self.assertEqual( + {product_dict.make_key(resource): subarches}, + product_dict.mapping) + + def test_contains_returns_true_for_stored_item(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource, subarch) + self.assertTrue(product_dict.contains(resource)) + + def test_contains_returns_false_for_unstored_item(self): + self.assertFalse( + ProductMapping().contains(make_boot_resource())) + + def test_contains_ignores_similar_items(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource.copy(), subarch) + resource['product_name'] = factory.make_name('other') + self.assertFalse(product_dict.contains(resource)) + + def test_contains_ignores_extraneous_keys(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource.copy(), subarch) + resource['other_item'] = factory.make_name('other') + self.assertTrue(product_dict.contains(resource)) + + def test_get_returns_stored_item(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource, subarch) + self.assertEqual([subarch], product_dict.get(resource)) + + def test_get_fails_for_unstored_item(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource.copy(), subarch) + resource['content_id'] = factory.make_name('other') + self.assertRaises(KeyError, product_dict.get, resource) + + def test_get_ignores_extraneous_keys(self): + product_dict = ProductMapping() + resource = make_boot_resource() + subarch = factory.make_name('subarch') + product_dict.add(resource, subarch) + resource['other_item'] = factory.make_name('other') + self.assertEqual([subarch], product_dict.get(resource)) + + +class TestMapProducts(MAASTestCase): + """Tests for `map_products`.""" + + def test_maps_empty_dict_to_empty_dict(self): + empty_boot_image_dict = BootImageMapping() + self.assertEqual({}, map_products(empty_boot_image_dict).mapping) + + def test_maps_boot_resource_by_content_id_product_name_and_version(self): + image = make_image_spec() + resource = make_boot_resource() + boot_dict = set_resource(resource=resource.copy(), image_spec=image) + self.assertEqual( + { + ( + resource['content_id'], + resource['product_name'], + resource['version_name'], + ): [image.subarch], + }, + map_products(boot_dict).mapping) + + def test_concatenates_similar_resources(self): + image1 = make_image_spec() + image2 = make_image_spec() + resource = make_boot_resource() + boot_dict = BootImageMapping() + # Create two images in boot_dict, both containing the same resource. + for image in [image1, image2]: + set_resource( + boot_dict=boot_dict, resource=resource.copy(), + image_spec=image) + + products_mapping = map_products(boot_dict) + key = ( + resource['content_id'], + resource['product_name'], + resource['version_name'], + ) + self.assertEqual([key], products_mapping.mapping.keys()) + self.assertItemsEqual( + [image1.subarch, image2.subarch], + products_mapping.get(resource)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_uec2roottar.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_uec2roottar.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/tests/test_uec2roottar.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/tests/test_uec2roottar.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,343 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the `uec2roottar` script and its supporting module..""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os +import os.path +from subprocess import CalledProcessError + +from maastesting.factory import factory +from maastesting.matchers import ( + MockAnyCall, + MockCalledOnceWith, + MockNotCalled, + ) +from maastesting.testcase import MAASTestCase +import mock +from provisioningserver.import_images import uec2roottar +from testtools.matchers import HasLength +from testtools.testcase import ExpectedException + + +def make_image_name(suffix='.img'): + """Create an image file name (but not the actual file).""" + return factory.make_name('root') + suffix + + +def make_image(testcase, contents=None, suffix='.img'): + """Create an image file.""" + name = make_image_name(suffix) + return testcase.make_file(name=name, contents=contents) + + +def make_tarball_name(prefix='tarball'): + """Create an arbitrary name for a tarball.""" + return factory.make_name(prefix) + '.tar.gz' + + +def make_roottar_location(testcase): + """Create a name for an output root tarball, in an empty directory.""" + name = make_tarball_name('root') + return os.path.join(testcase.make_dir(), name) + + +def patch_is_filesystem_file(testcase, answer): + """Patch `is_filesystem_file` to return the given answer.""" + testcase.patch(uec2roottar, 'is_filesystem_file').return_value = answer + + +class TestMakeArgParser(MAASTestCase): + """Tests for `make_argparser`.""" + + def test__defines_expected_options(self): + image = make_image(self) + output = make_roottar_location(self) + user = factory.make_name('user') + + parser = uec2roottar.make_argparser(factory.make_string()) + args = parser.parse_args([image, output, '--user', user]) + + self.assertEqual( + ( + image, + output, + user, + ), + ( + args.image, + args.output, + args.user, + )) + + def test__user_defaults_to_None(self): + parser = uec2roottar.make_argparser(factory.make_string()) + args = parser.parse_args( + [make_image(self), make_roottar_location(self)]) + self.assertIsNone(args.user) + + +class TestIsFilesystemFile(MAASTestCase): + """Tests for `is_filesystem_file`.""" + + def test__returns_True_if_file_looks_like_filesystem(self): + image = make_image(self, suffix='.img') + self.patch(uec2roottar, 'check_output').return_value = ( + ("%s: filesystem data" % image).encode('utf-8')) + self.assertTrue(uec2roottar.is_filesystem_file(image)) + + def test__returns_False_for_tarball(self): + image = make_image(self, suffix='.tar.gz') + self.patch(uec2roottar, 'check_output').return_value = ( + ("%s: gzip compressed data, was ..." % image).encode('utf-8')) + self.assertFalse(uec2roottar.is_filesystem_file(image)) + + def test__calls_file_with_C_language_setting(self): + env_during_invocation = {} + + def fake_check_output(*args, **kwargs): + env_during_invocation.update(os.environ) + return b'' + + self.patch(uec2roottar, 'check_output', fake_check_output) + + uec2roottar.is_filesystem_file(make_image(self)) + + self.assertEqual('C', env_during_invocation.get('LANG')) + + +class TestExtractImageFromTarball(MAASTestCase): + """Tests for `extract_image_from_tarball`.""" + + def test__extracts_image(self): + tarball = make_tarball_name() + self.patch(uec2roottar, 'check_call') + # Cheat: patch away extraction of the tarball, but pass a temporary + # directory with an image already in it. The function will think it + # just extracted the image from the tarball. + image = make_image(self) + working_dir = os.path.dirname(image) + + result = uec2roottar.extract_image_from_tarball(tarball, working_dir) + + self.assertThat( + uec2roottar.check_call, + MockCalledOnceWith([ + 'tar', + '-C', working_dir, + '--wildcards', '*.img', + '-Sxvzf', + tarball, + ])) + self.assertEqual(image, result) + + def test__ignores_other_files(self): + tarball = make_tarball_name() + self.patch(uec2roottar, 'check_call') + # Make the function think that it found two files in the tarball: an + # image and some other file. + image = make_image(self) + working_dir = os.path.dirname(image) + # This other file doesn't upset things, because it doesn't look like + # an image file. + factory.make_file(working_dir) + + self.assertEqual( + image, + uec2roottar.extract_image_from_tarball(tarball, working_dir)) + + def test__fails_if_no_image_found(self): + tarball = make_tarball_name() + self.patch(uec2roottar, 'check_call') + empty_dir = self.make_dir() + error = self.assertRaises( + uec2roottar.ImageFileError, + uec2roottar.extract_image_from_tarball, tarball, empty_dir) + self.assertEqual( + "Tarball %s does not contain any *.img." % tarball, + unicode(error)) + + def test__fails_if_multiple_images_found(self): + tarball = make_tarball_name() + self.patch(uec2roottar, 'check_call') + working_dir = self.make_dir() + files = sorted( + factory.make_file(working_dir, name=make_image_name()) + for _ in range(2)) + error = self.assertRaises( + uec2roottar.ImageFileError, + uec2roottar.extract_image_from_tarball, tarball, working_dir) + self.assertEqual( + "Tarball %s contains multiple image files: %s." + % (tarball, ', '.join(files)), + unicode(error)) + + +class TestGetImageFile(MAASTestCase): + """Tests for `get_image_file`.""" + + def test__returns_actual_image_file_unchanged(self): + patch_is_filesystem_file(self, True) + image = make_image(self) + self.assertEqual( + image, + uec2roottar.get_image_file(image, factory.make_name('dir'))) + + def test__extracts_tarball_into_temp_dir(self): + patch_is_filesystem_file(self, False) + tarball = make_tarball_name() + temp_dir = self.make_dir() + image = make_image_name() + patch = self.patch(uec2roottar, 'extract_image_from_tarball') + patch.return_value = image + result = uec2roottar.get_image_file(tarball, temp_dir) + self.assertEqual(image, result) + self.assertThat(patch, MockCalledOnceWith(tarball, temp_dir)) + + def test__rejects_other_files(self): + patch_is_filesystem_file(self, False) + filename = factory.make_name('weird-file') + error = self.assertRaises( + uec2roottar.ImageFileError, + uec2roottar.get_image_file, filename, factory.make_name('dir')) + self.assertEqual( + "Expected '%s' to be either a filesystem file, or a " + "gzipped tarball containing one." % filename, + unicode(error)) + + +class TestUnmount(MAASTestCase): + """Tests for `unmount`.""" + + def test__calls_umount(self): + self.patch(uec2roottar, 'check_call') + mountpoint = factory.make_name('mount') + uec2roottar.unmount(mountpoint) + self.assertThat( + uec2roottar.check_call, + MockCalledOnceWith(['umount', mountpoint])) + + def test__propagates_failure(self): + failure = CalledProcessError(9, factory.make_name('delibfail')) + self.patch(uec2roottar, 'check_call').side_effect = failure + self.patch(uec2roottar, 'maaslog') + mountpoint = factory.make_name('mount') + self.assertRaises(CalledProcessError, uec2roottar.unmount, mountpoint) + self.assertThat( + uec2roottar.maaslog.error, + MockCalledOnceWith( + "Could not unmount %s: %s", mountpoint, failure)) + + +class TestLoopMount(MAASTestCase): + """Tests for `loop_mount`.""" + + def test__mounts_and_unmounts_image(self): + image = make_image_name() + self.patch(uec2roottar, 'check_call') + mountpoint = factory.make_name('mount') + + calls_before = len(uec2roottar.check_call.mock_calls) + with uec2roottar.loop_mount(image, mountpoint): + calls_during = len(uec2roottar.check_call.mock_calls) + calls_after = len(uec2roottar.check_call.mock_calls) + + self.assertEqual( + (0, 1, 2), + (calls_before, calls_during, calls_after)) + self.assertThat( + uec2roottar.check_call, + MockAnyCall(['mount', '-o', 'ro', image, mountpoint])) + self.assertThat( + uec2roottar.check_call, + MockAnyCall(['umount', mountpoint])) + + def test__cleans_up_after_failure(self): + class DeliberateException(Exception): + pass + + self.patch(uec2roottar, 'check_call') + image = make_image_name() + mountpoint = factory.make_name('mount') + with ExpectedException(DeliberateException): + with uec2roottar.loop_mount(image, mountpoint): + raise DeliberateException() + + self.assertThat( + uec2roottar.check_call, MockAnyCall(['umount', mountpoint])) + + +class TestExtractImage(MAASTestCase): + """Tests for `extract_image`.""" + + def extract_command_line(self, call): + """Extract the command line from a `mock.call` for `check_call`.""" + _, args, _ = call + [command] = args + return command + + def test__extracts_image(self): + image = make_image_name() + output = make_tarball_name() + self.patch(uec2roottar, 'check_call') + uec2roottar.extract_image(image, output) + self.assertThat(uec2roottar.check_call.mock_calls, HasLength(3)) + [mount_call, tar_call, umount_call] = uec2roottar.check_call.mock_calls + self.assertEqual('mount', self.extract_command_line(mount_call)[0]) + tar_command = self.extract_command_line(tar_call) + self.assertEqual(['tar', '-C'], tar_command[:2]) + self.assertEqual('umount', self.extract_command_line(umount_call)[0]) + + +class TestSetOwnership(MAASTestCase): + """Tests for `set_ownership`.""" + + def test__does_nothing_if_no_user_specified(self): + self.patch(uec2roottar, 'check_call') + uec2roottar.set_ownership(make_tarball_name(), user=None) + self.assertThat(uec2roottar.check_call, MockNotCalled()) + + def test__calls_chown_if_user_specified(self): + self.patch(uec2roottar, 'check_call') + user = factory.make_name('user') + tarball = make_tarball_name() + uec2roottar.set_ownership(tarball, user=user) + self.assertThat( + uec2roottar.check_call, + MockCalledOnceWith(['/bin/chown', user, tarball])) + + +class TestUEC2RootTar(MAASTestCase): + """Integration tests for `uec2roottar`.""" + + def make_args(self, **kwargs): + """Fake an `argparser` arguments object.""" + args = mock.Mock() + for key, value in kwargs.items(): + setattr(args, key, value) + return args + + def test__integrates(self): + image_name = factory.make_name('root-image') + '.img' + image = self.make_file(name=image_name) + output_name = factory.make_name('root-tar') + '.tar.gz' + output = os.path.join(self.make_dir(), output_name) + args = self.make_args(image=image, output=output) + self.patch(uec2roottar, 'check_call') + patch_is_filesystem_file(self, True) + + uec2roottar.main(args) + + self.assertThat( + uec2roottar.is_filesystem_file, MockCalledOnceWith(image)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/import_images/uec2roottar.py maas-1.7.6+bzr3376/src/provisioningserver/import_images/uec2roottar.py --- maas-1.5.4+bzr2294/src/provisioningserver/import_images/uec2roottar.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/import_images/uec2roottar.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,183 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Code for the `uec2roottar` script.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'main', + 'make_argparser', + ] + +import argparse +from contextlib import contextmanager +from glob import glob +import os.path +from subprocess import ( + check_call, + check_output, + ) + +from provisioningserver.logger import get_maas_logger +from provisioningserver.utils.env import environment_variables +from provisioningserver.utils.fs import tempdir + + +maaslog = get_maas_logger("uec2roottar") + + +def make_argparser(description): + """Create an `ArgumentParser` for this script.""" + parser = argparse.ArgumentParser(description=description) + parser.add_argument( + 'image', metavar='IMAGE-FILE', help="Input file: UEC root image.") + parser.add_argument( + 'output', metavar='TARBALL', help="Output file: root tarball.") + parser.add_argument( + '--user', '-u', help="Set output file ownership to USER.") + return parser + + +def is_filesystem_file(path): + """Does the file at `path` look like a filesystem-in-a-file?""" + # Identify filesystems using the "file" utility. We'll be parsing the + # output, so suppress any translation. + with environment_variables({'LANG': 'C'}): + output = check_output(['file', path]) + return b"filesystem data" in output + + +class ImageFileError(Exception): + """Problem with the given image file.""" + + +def extract_image_from_tarball(tarball, working_dir): + """Extract image file from `tarball` into `working_dir`, return its path. + + This may extract multiple files into `working_dir`; it looks for files with + names like `*.img`. The function only succeeds, however, if there is + exactly one of those, in the tarball's root directory. + """ + glob_pattern = '*.img' + maaslog.debug( + "Extracting %s from %s into %s.", glob_pattern, tarball, working_dir) + check_call([ + 'tar', + '-C', working_dir, + '--wildcards', glob_pattern, + '-Sxvzf', tarball, + ]) + # Look for .img files. Sort just so that if there is more than one image + # file, we'll produce a consistent error message. + candidates = sorted(glob(os.path.join(working_dir, glob_pattern))) + if len(candidates) == 0: + raise ImageFileError( + "Tarball %s does not contain any %s." % (tarball, glob_pattern)) + if len(candidates) > 1: + raise ImageFileError( + "Tarball %s contains multiple image files: %s." + % (tarball, ', '.join(candidates))) + [image] = candidates + return image + + +def get_image_file(path, temp_dir): + """Return image file at, or contained in tarball at, `path`. + + :param path: Path to the image file. Must point to either a file + containing a filesystem, or a tarball containing one, of the same + base name. + :param temp_dir: A temporary working directory. If the image needs to be + extracted from a tarball, the tarball will be extracted here. + """ + if is_filesystem_file(path): + # Easy. This is the actual image file. + return path + elif path.endswith('.tar.gz'): + # Tarball. Extract image file. + return extract_image_from_tarball(path, temp_dir) + else: + raise ImageFileError( + "Expected '%s' to be either a filesystem file, or " + "a gzipped tarball containing one." % path) + + +def unmount(mountpoint): + """Unmount filesystem at given mount point. + + If this fails, it logs the error as well as raising it. This means that + error code paths can suppress the exception without depriving the user of + the information. + """ + try: + check_call(['umount', mountpoint]) + except BaseException as e: + maaslog.error("Could not unmount %s: %s", mountpoint, e) + raise + + +@contextmanager +def loop_mount(image, mountpoint): + """Context manager: temporarily loop-mount `image` at `mountpoint`.""" + check_call(['mount', '-o', 'ro', image, mountpoint]) + try: + yield + except: + try: + unmount(mountpoint) + except Exception: + # This is probably a secondary error resulting from the original + # problem. Stick with the original exception. + pass + raise + else: + # Unmount after successful run. If this fails, let the exception + # propagate. + unmount(mountpoint) + + +def extract_image(image, output): + """Loop-mount `image`, and tar its contents into `output`.""" + with tempdir() as mountpoint: + with loop_mount(image, mountpoint): + check_call([ + 'tar', + # Work from mountpoint as the current directory. + '-C', mountpoint, + # Options: + # -c: Create tarfile. + # -p: Preserve permissions. + # -S: Handle sparse files efficiently (images have those). + # -z: Compress using gzip. + # -f: Work on given tar file. + '-cpSzf', output, + '--numeric-owner', + # Tar up the "current directory": the mountpoint. + '.', + ]) + + +def set_ownership(path, user=None): + """Set file ownership to `user` if specified.""" + if user is not None: + maaslog.debug("Setting file owner to %s.", user) + check_call(['/bin/chown', user, path]) + + +def main(args): + """Do the work: loop-mount image, write contents to output file.""" + output = args.output + maaslog.debug("Converting %s to %s.", args.image, output) + with tempdir() as working_dir: + image = get_image_file(args.image, working_dir) + extract_image(image, output) + set_ownership(output, args.user) + maaslog.debug("Finished. Wrote to %s.", output) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/initialize_cache.py maas-1.7.6+bzr3376/src/provisioningserver/initialize_cache.py --- maas-1.5.4+bzr2294/src/provisioningserver/initialize_cache.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/initialize_cache.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Module with side effect: import to initialize the inter-worker cache. - -This is here merely so as to avoid accidental initialization of the cache. -Import this module and the cache will be initialized. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from provisioningserver.cache import initialize - - -initialize() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""The MAAS Provisioning Server, now referred to as Cluster.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + + +from twisted.application.service import MultiService +from twisted.internet.protocol import Factory + +# The cluster's services. This is initialised by +# ProvisioningServiceMaker. +services = MultiService() + +# Make t.i.protocol.Factory quiet. Its jabbering is mind-numbingly +# useless. +Factory.noisy = False + + +try: + import maasfascist + maasfascist # Silence lint. +except ImportError: + pass diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/kernel_opts.py maas-1.7.6+bzr3376/src/provisioningserver/kernel_opts.py --- maas-1.5.4+bzr2294/src/provisioningserver/kernel_opts.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/kernel_opts.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,7 +21,7 @@ from collections import namedtuple import os -from provisioningserver.driver import ArchitectureRegistry +from provisioningserver.drivers import ArchitectureRegistry class EphemeralImagesDirectoryNotFound(Exception): @@ -30,9 +30,10 @@ KernelParametersBase = namedtuple( "KernelParametersBase", ( + "osystem", # Operating system, e.g. "ubuntu" "arch", # Machine architecture, e.g. "i386" "subarch", # Machine subarchitecture, e.g. "generic" - "release", # Ubuntu release, e.g. "precise" + "release", # OS release, e.g. "precise" "label", # Image label, e.g. "release" "purpose", # Boot purpose, e.g. "commissioning" "hostname", # Machine hostname, e.g. "coleman" @@ -90,9 +91,15 @@ ISCSI_TARGET_NAME_PREFIX = "iqn.2004-05.com.ubuntu:maas" -def get_ephemeral_name(arch, subarch, release, label): +def get_ephemeral_name(osystem, arch, subarch, release, label): """Return the name of the most recent ephemeral image.""" - return "ephemeral-%s-%s-%s-%s" % (arch, subarch, release, label) + return "ephemeral-%s-%s-%s-%s-%s" % ( + osystem, + arch, + subarch, + release, + label + ) def compose_hostname_opts(params): @@ -119,7 +126,8 @@ # These are kernel parameters read by the ephemeral environment. tname = prefix_target_name( get_ephemeral_name( - params.arch, params.subarch, params.release, params.label)) + params.osystem, params.arch, params.subarch, + params.release, params.label)) kernel_params = [ # Read by the open-iscsi initramfs code. "iscsi_target_name=%s" % tname, diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/logger/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/logger/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/logger/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/logger/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,19 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). +"""__init__ for the provisioningserver.logger package.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "get_maas_logger" + ] + + +from provisioningserver.logger.log import get_maas_logger diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/logger/log.py maas-1.7.6+bzr3376/src/provisioningserver/logger/log.py --- maas-1.5.4+bzr2294/src/provisioningserver/logger/log.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/logger/log.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,71 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Logging for MAAS, redirects to syslog.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "get_maas_logger", + ] + +import logging +from logging.handlers import SysLogHandler + + +class MAASLogger(logging.getLoggerClass()): + """A Logger class that doesn't allow you to call exception().""" + + def exception(self, *args, **kwargs): + raise NotImplementedError( + "Don't log exceptions to maaslog; use the default " + "Django logger instead") + + +def get_maas_logger(syslog_tag=None): + """Return a MAAS logger that will log to syslog. + + :param syslog_tag: A string that will be used to prefix the message + in syslog. Will be appended to "maas" in the form + "maas.". If None, the syslog tag will simply be + "maas". syslog_tag is also used to name the logger with the + Python logging module; loggers will be named "maas." + unless syslog_tag is None. + """ + if syslog_tag is None: + logger_name = "maas" + else: + logger_name = "maas.%s" % syslog_tag + + maaslog = logging.getLogger(logger_name) + # This line is pure filth, but it allows us to return MAASLoggers + # for any logger constructed by this function, whilst leaving all + # other loggers to be the domain of the logging package. + maaslog.__class__ = MAASLogger + + return maaslog + + +def configure_root_logger(): + # Configure the "root" handler. This is the only place where we need to + # add the syslog handler and configure levels and formatting; sub-handlers + # propagate up to this handler. + root = get_maas_logger() + if len(root.handlers) == 0: + # It has not yet been configured. + handler = SysLogHandler("/dev/log") + handler.setFormatter(logging.Formatter( + "%(name)s: [%(levelname)s] %(message)s")) + root.addHandler(handler) + root.setLevel(logging.INFO) + return root + + +configure_root_logger() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/logger/tests/test_logger.py maas-1.7.6+bzr3376/src/provisioningserver/logger/tests/test_logger.py --- maas-1.5.4+bzr2294/src/provisioningserver/logger/tests/test_logger.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/logger/tests/test_logger.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,129 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for log.py""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from itertools import imap +import logging +import logging.handlers + +from maastesting.factory import factory +from provisioningserver.logger import log +from provisioningserver.logger.log import ( + get_maas_logger, + MAASLogger, + ) +from provisioningserver.testing.testcase import PservTestCase +from testtools.matchers import ( + HasLength, + IsInstance, + ) + + +class TestGetMAASLogger(PservTestCase): + + def test_root_logger_logs_to_syslog(self): + root_logger = get_maas_logger() + self.assertThat(root_logger.handlers, HasLength(1)) + [handler] = root_logger.handlers + self.assertThat(handler, IsInstance(logging.handlers.SysLogHandler)) + + def test_root_logger_defaults_to_info(self): + root_logger = get_maas_logger() + self.assertEqual(logging.INFO, root_logger.level) + + def test_does_not_log_twice(self): + maas_logger = get_maas_logger() + maas_foo_logger = get_maas_logger("foo") + + all_handlers = [] + # In previous versions of get_maas_logger(), the all_handlers list + # would end up containing two handlers, because a new SysLogHandler + # was added to each logger. This means that logging to the "maas.foo" + # logger would emit a message to syslog via its handler, then the log + # record would be propagated up to the "maas" logger (which we're + # calling the root logger in this context) where its handler would + # then emit another message to syslog. + all_handlers.extend(maas_logger.handlers) + all_handlers.extend(maas_foo_logger.handlers) + self.expectThat(all_handlers, HasLength(1)) + + # Intercept calls to `emit` on each handler above. + log_records = [] + for handler in all_handlers: + self.patch(handler, "emit", log_records.append) + + maas_foo_logger.info("A message from the Mekon") + + self.assertThat(log_records, HasLength(1)) + + def test_sets_custom_formatting(self): + logger = get_maas_logger("foo.bar") + [handler] = get_maas_logger().handlers + log_records = [] + self.patch(handler, "emit", log_records.append) + + robot_name = factory.make_name("Robot") + logger.info("Hello there %s!", robot_name) + + self.assertEqual( + "maas.foo.bar: [INFO] Hello there %s!" % robot_name, + "\n---\n".join(imap(handler.format, log_records))) + + def test_sets_logger_name(self): + self.patch(log, 'SysLogHandler') + self.patch(logging, 'Formatter') + name = factory.make_string() + maaslog = get_maas_logger(name) + self.assertEqual("maas.%s" % name, maaslog.name) + + def test_returns_same_logger_if_called_twice(self): + self.patch(log, 'SysLogHandler') + self.patch(logging, 'Formatter') + name = factory.make_string() + maaslog = get_maas_logger(name) + maaslog_2 = get_maas_logger(name) + self.assertIs(maaslog, maaslog_2) + + def test_exception_calls_disallowed(self): + self.patch(log, 'SysLogHandler') + self.patch(logging, 'Formatter') + name = factory.make_string() + maaslog = get_maas_logger(name) + self.assertRaises( + NotImplementedError, maaslog.exception, + factory.make_string()) + + def test_returns_MAASLogger_instances(self): + self.patch(log, 'SysLogHandler') + self.patch(logging, 'Formatter') + name = factory.make_string() + maaslog = get_maas_logger(name) + self.assertIsInstance(maaslog, MAASLogger) + + def test_doesnt_affect_general_logger_class(self): + self.patch(log, 'SysLogHandler') + self.patch(logging, 'Formatter') + name = factory.make_string() + get_maas_logger(name) + self.assertIsNot( + MAASLogger, logging.getLoggerClass()) + + def test_general_logger_class_accepts_exceptions(self): + self.patch(log, 'SysLogHandler') + self.patch(logging, 'Formatter') + name = factory.make_string() + get_maas_logger(name) + other_logger = logging.getLogger() + self.assertIsNone(other_logger.exception(factory.make_string())) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/logger/utils.py maas-1.7.6+bzr3376/src/provisioningserver/logger/utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/logger/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/logger/utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,47 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities for logging.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "log_call", +] + +from functools import wraps +import logging + +from provisioningserver.logger.log import get_maas_logger + + +maaslog = get_maas_logger("calls") + + +def log_call(level=logging.INFO): + """Log to the maaslog that something happened with a task. + + :param event: The event that we want to log. + :param task_name: The name of the task. + :**kwargs: A dict of args passed to the task. + """ + def _decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + arg_string = "%s %s" % (args, kwargs) + maaslog.log( + level, "Starting task '%s' with args: %s" % + (func.__name__, arg_string)) + func(*args, **kwargs) + maaslog.log( + level, "Finished task '%s' with args: %s" % + (func.__name__, arg_string)) + return wrapper + return _decorator diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/__main__.py maas-1.7.6+bzr3376/src/provisioningserver/__main__.py --- maas-1.5.4+bzr2294/src/provisioningserver/__main__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/__main__.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,13 +14,14 @@ __metaclass__ = type +from provisioningserver import security import provisioningserver.boot.install_bootloader import provisioningserver.boot.install_grub +import provisioningserver.configure_maas_url import provisioningserver.customize_config import provisioningserver.dhcp.writer -import provisioningserver.start_cluster_controller import provisioningserver.upgrade_cluster -from provisioningserver.utils import ( +from provisioningserver.utils.script import ( AtomicWriteScript, MainScript, ) @@ -28,10 +29,12 @@ script_commands = { 'atomic-write': AtomicWriteScript, + 'check-for-shared-secret': security.CheckForSharedSecretScript, + 'configure-maas-url': provisioningserver.configure_maas_url, 'customize-config': provisioningserver.customize_config, 'generate-dhcp-config': provisioningserver.dhcp.writer, + 'install-shared-secret': security.InstallSharedSecretScript, 'install-uefi-config': provisioningserver.boot.install_grub, - 'start-cluster-controller': provisioningserver.start_cluster_controller, 'upgrade-cluster': provisioningserver.upgrade_cluster, } diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/network.py maas-1.7.6+bzr3376/src/provisioningserver/network.py --- maas-1.5.4+bzr2294/src/provisioningserver/network.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/network.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Discover networks attached to this cluster controller. @@ -21,29 +21,54 @@ ] from itertools import chain +from operator import attrgetter +from netaddr import ( + IPAddress, + IPNetwork, + ) from netifaces import ( AF_INET, + AF_INET6, ifaddresses, interfaces, ) +from provisioningserver.utils.network import clean_up_netifaces_address -class InterfaceInfo: - """The details of a network interface we are interested in.""" +class AttachedNetwork: + """A network as found attached to a network interface.""" def __init__(self, interface, ip=None, subnet_mask=None): self.interface = interface self.ip = ip self.subnet_mask = subnet_mask - def may_be_subnet(self): - """Could this be a subnet that MAAS is interested in?""" - return all([ - self.interface != 'lo', - self.ip is not None, - self.subnet_mask is not None, - ]) + @classmethod + def from_address(cls, interface_name, address): + """Construct `AttachedNetwork` from address as found by `netifaces`.""" + addr = address.get('addr') + if addr is not None: + addr = clean_up_netifaces_address(addr, interface_name) + return cls(interface_name, ip=addr, subnet_mask=address.get('netmask')) + + def is_relevant(self): + """Could this be a network that MAAS is interested in?""" + if self.interface == 'lo': + # Loopback device. Not useful for nodes. + return False + if self.ip is None: + # Interface has no address. Not usable. + return False + ipaddress = IPAddress(self.ip) + if ipaddress.version == 4 and not self.subnet_mask: + # IPv4 network has no broadcast address configured. Not usable. + return False + if ipaddress.is_link_local(): + # Link-local address. MAAS doesn't know how to manage these. + return False + # Met all these requirements? Then this is a relevant network. + return True def as_dict(self): """Return information as a dictionary. @@ -57,26 +82,58 @@ 'subnet_mask': self.subnet_mask, } + def get_ip_network(self): + """Return `IPNetwork` for this network.""" + return IPNetwork('%s/%s' % (self.ip, self.subnet_mask)).cidr + + +# Feature flag: reveal IPv6 capabilities to the user? +# +# While this is set to False, MAAS will not auto-detect IPv6 networks. +REVEAL_IPv6 = True + def get_interface_info(interface): - """Return a list of `InterfaceInfo` for the named `interface`.""" - addresses = ifaddresses(interface).get(AF_INET) - if addresses is None: - return [] + """Return a list of `AttachedNetwork` for the named `interface`.""" + ipv4_addrs = ifaddresses(interface).get(AF_INET, []) + if REVEAL_IPv6: + ipv6_addrs = ifaddresses(interface).get(AF_INET6, []) else: - return [ - InterfaceInfo( - interface, ip=address.get('addr'), - subnet_mask=address.get('netmask')) - for address in addresses] + ipv6_addrs = [] + return [ + AttachedNetwork.from_address(interface, address) + for address in ipv4_addrs + ipv6_addrs + ] + + +def filter_unique_networks(networks): + """Return only distinct networks out of `networks`. + + If two entries are on the same network (even if the entries' IP addresses + differ), only one of them will be returned. + + :param networks: Iterable of `AttachedNetwork` that pass the + `is_relevant` test. + :return: List of `AttachedNetwork`. + """ + known_ip_networks = set() + unique_networks = [] + for network in sorted(networks, key=attrgetter('ip')): + ip_network = network.get_ip_network() + if ip_network not in known_ip_networks: + unique_networks.append(network) + known_ip_networks.add(ip_network) + return unique_networks def discover_networks(): - """Find the networks attached to this system.""" - infos = chain.from_iterable( + """Find the networks attached to this system. + + :return: A list of dicts, each containing keys `interface`, `ip`, and + `subnet_mask`. + """ + networks = chain.from_iterable( get_interface_info(interface) for interface in interfaces()) - return [ - info.as_dict() - for info in infos - if info.may_be_subnet() - ] + networks = [network for network in networks if network.is_relevant()] + networks = filter_unique_networks(networks) + return [network.as_dict() for network in networks] diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/omshell.py maas-1.7.6+bzr3376/src/provisioningserver/omshell.py --- maas-1.5.4+bzr2294/src/provisioningserver/omshell.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/omshell.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,192 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Python wrapper around the `omshell` utility which amends objects -inside the DHCP server. -""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - "generate_omapi_key", - "Omshell", - ] - -import os -import re -from subprocess import ( - PIPE, - Popen, - ) -from textwrap import dedent - -from provisioningserver.utils import ( - call_capture_and_check, - ExternalProcessError, - parse_key_value_file, - tempdir, - ) - - -bad_key_pattern = re.compile("[+/]no|no[+/]", flags=re.IGNORECASE) - - -def call_dnssec_keygen(tmpdir): - path = os.environ.get("PATH", "").split(os.pathsep) - path.append("/usr/sbin") - env = dict(os.environ, PATH=os.pathsep.join(path)) - return call_capture_and_check( - ['dnssec-keygen', '-r', '/dev/urandom', '-a', 'HMAC-MD5', - '-b', '512', '-n', 'HOST', '-K', tmpdir, '-q', 'omapi_key'], - env=env) - - -def run_repeated_keygen(tmpdir): - # omshell has a bug where if the chars '/' or '+' appear either - # side of the word 'no' (in any case), it throws an error like - # "partial base64 value left over". We check for that here and - # repeatedly generate a new key until a good one is generated. - - key = None - while key is None: - key_id = call_dnssec_keygen(tmpdir) - - # Locate the file that was written and strip out the Key: field in - # it. - if not key_id: - raise AssertionError("dnssec-keygen didn't generate anything") - key_id = key_id.strip() # Remove trailing newline. - key_file_name = os.path.join(tmpdir, key_id + '.private') - parsing_error = False - try: - config = parse_key_value_file(key_file_name) - except ValueError: - parsing_error = True - if parsing_error or 'Key' not in config: - raise AssertionError( - "Key field not found in output from dnssec-keygen") - - key = config['Key'] - if bad_key_pattern.search(key) is not None: - # Force a retry. - os.remove(key_file_name) # Stop dnssec_keygen complaints. - key = None - - return key - - -def generate_omapi_key(): - """Generate a HMAC-MD5 key by calling out to the dnssec-keygen tool. - - :return: The shared key suitable for OMAPI access. - :type: string - """ - # dnssec-keygen writes out files to a specified directory, so we - # need to make a temp directory for that. - # This relies on the temporary directory being accessible only to its - # owner. - temp_prefix = "%s." % os.path.basename(__file__) - with tempdir(prefix=temp_prefix) as tmpdir: - key = run_repeated_keygen(tmpdir) - return key - - -class Omshell: - """Wrap up the omshell utility in Python. - - 'omshell' is an external executable that communicates with a DHCP daemon - and manipulates its objects. This class wraps up the commands necessary - to add and remove host maps (MAC to IP). - - :param server_address: The address for the DHCP server (ip or hostname) - :param shared_key: An HMAC-MD5 key generated by dnssec-keygen like: - $ dnssec-keygen -r /dev/urandom -a HMAC-MD5 -b 512 -n HOST omapi_key - $ cat Komapi_key.+*.private |grep ^Key|cut -d ' ' -f2- - It must match the key set in the DHCP server's config which looks - like this: - - omapi-port 7911; - key omapi_key { - algorithm HMAC-MD5; - secret "XXXXXXXXX"; #<-The output from the generated key above. - }; - omapi-key omapi_key; - """ - - def __init__(self, server_address, shared_key): - self.server_address = server_address - self.shared_key = shared_key - self.command = ["omshell"] - - def _run(self, stdin): - proc = Popen(self.command, stdin=PIPE, stdout=PIPE) - stdout, stderr = proc.communicate(stdin) - if proc.poll() != 0: - raise ExternalProcessError(proc.returncode, self.command, stdout) - return proc.returncode, stdout - - def create(self, ip_address, mac_address): - # The "name" is not a host name; it's an identifier used within - # the DHCP server. We just happen to use the IP address. - stdin = dedent("""\ - server {self.server_address} - key omapi_key {self.shared_key} - connect - new host - set ip-address = {ip_address} - set hardware-address = {mac_address} - set hardware-type = 1 - set name = "{ip_address}" - create - """) - stdin = stdin.format( - self=self, ip_address=ip_address, mac_address=mac_address) - - returncode, output = self._run(stdin) - # If the call to omshell doesn't result in output containing the - # magic string 'hardware-type' then we can be reasonably sure - # that the 'create' command failed. Unfortunately there's no - # other output like "successful" to check so this is the best we - # can do. - if "hardware-type" in output: - # Success. - pass - elif "can't open object: I/O error" in output: - # Host map already existed. Treat as success. - pass - else: - raise ExternalProcessError(returncode, self.command, output) - - def remove(self, ip_address): - # The "name" is not a host name; it's an identifier used within - # the DHCP server. We just happen to use the IP address. - stdin = dedent("""\ - server {self.server_address} - key omapi_key {self.shared_key} - connect - new host - set name = "{ip_address}" - open - remove - """) - stdin = stdin.format( - self=self, ip_address=ip_address) - - returncode, output = self._run(stdin) - - # If the omshell worked, the last line should reference a null - # object. - lines = output.strip().splitlines() - try: - last_line = lines[-1] - except IndexError: - last_line = "" - if last_line != "obj: ": - raise ExternalProcessError(returncode, self.command, output) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/path.py maas-1.7.6+bzr3376/src/provisioningserver/path.py --- maas-1.5.4+bzr2294/src/provisioningserver/path.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/path.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,45 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Compute paths relative to root.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'get_path', + ] + +from os import getenv +import os.path + + +def get_path(*path_elements): + """Return an absolute path based on the `MAAS_ROOT` environment variable. + + Use this to compute paths like `/var/lib/maas/gnupg`, so that development + environments can redirect them to a playground location. For example, if + `MAAS_ROOT` is set to `/tmp/maasroot`, then `get_path()` will return + `/tmp/maasroot` and `get_path('/var/lib/maas')` returns + `/tmp/maasroot/var/lib/maas`. If `MAAS_ROOT` is not set, you just get (a + normalised version of) the location you passed in; just `get_path()` will + always return the root directory. + + This call may have minor side effects: it reads environment variables and + the current working directory. Side effects during imports are bad, so + avoid using this in global variables. Instead of exporting a variable + that holds your path, export a getter function that returns your path. + Add caching if it becomes a performance problem. + """ + maas_root = getenv('MAAS_ROOT', '/') + # Strip off a leading slash, if any. If left in, it would override any + # preceding path elements. The MAAS_ROOT would be ignored. + # The dot is there to make the call work even with zero path elements. + path = os.path.join('.', *path_elements).lstrip('/') + return os.path.abspath(os.path.join(maas_root, path)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/plugin.py maas-1.7.6+bzr3376/src/provisioningserver/plugin.py --- maas-1.5.4+bzr2294/src/provisioningserver/plugin.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/plugin.py 2015-07-10 01:27:14.000000000 +0000 @@ -12,23 +12,35 @@ str = None __metaclass__ = type -__all__ = [] +__all__ = [ + "LogService", + "ProvisioningServiceMaker", +] -from provisioningserver.amqpclient import AMQFactory +import signal +import sys + +import provisioningserver +from provisioningserver.cluster_config import get_cluster_uuid from provisioningserver.config import Config -from provisioningserver.rpc.clusterservice import ClusterClientService -from provisioningserver.services import ( - LogService, - OOPSService, - ) -from provisioningserver.tftp import TFTPService -from twisted.application.internet import ( - TCPClient, - TCPServer, +from provisioningserver.pserv_services.dhcp_probe_service import ( + DHCPProbeService, + ) +from provisioningserver.pserv_services.image_download_service import ( + ImageDownloadService, ) +from provisioningserver.pserv_services.lease_upload_service import ( + LeaseUploadService, + ) +from provisioningserver.pserv_services.node_power_monitor_service import ( + NodePowerMonitorService, + ) +from provisioningserver.pserv_services.tftp import TFTPService +from provisioningserver.rpc.clusterservice import ClusterClientService +from twisted.application.internet import TCPServer from twisted.application.service import ( IServiceMaker, - MultiService, + Service, ) from twisted.cred.checkers import ICredentialsChecker from twisted.cred.credentials import IUsernamePassword @@ -40,10 +52,9 @@ returnValue, ) from twisted.plugin import IPlugin -from twisted.python import ( - log, - usage, - ) +from twisted.python import usage +from twisted.python.log import FileLogObserver +from twisted.python.logfile import LogFile from twisted.web.resource import ( IResource, Resource, @@ -90,6 +101,46 @@ raise NotImplementedError() +class LogService(Service): + + name = "log" + + def __init__(self, filename): + self.filename = filename + self.logfile = None + self.observer = None + + def _signal_handler(self, sig, frame): + reactor.callFromThread(self.logfile.reopen) + + def startService(self): + Service.startService(self) + if self.filename != '-': + self.logfile = LogFile.fromFullPath( + self.filename, rotateLength=None, defaultMode=0o644) + self.__previous_signal_handler = signal.signal( + signal.SIGUSR1, self._signal_handler) + else: + self.logfile = sys.stdout + self.observer = FileLogObserver(self.logfile) + self.observer.start() + + def stopService(self): + Service.stopService(self) + if self.filename != '-': + signal.signal(signal.SIGUSR1, self.__previous_signal_handler) + del self.__previous_signal_handler + self.observer.stop() + self.observer = None + self.logfile.close() + self.logfile = None + else: + self.observer.stop() + self.observer = None + # Don't close stdout. + self.logfile = None + + class Options(usage.Options): """Command line options for the provisioning server.""" @@ -99,7 +150,7 @@ @implementer(IServiceMaker, IPlugin) -class ProvisioningServiceMaker(object): +class ProvisioningServiceMaker: """Create a service for the Twisted plugin.""" options = Options @@ -112,12 +163,6 @@ """Create the log service.""" return LogService(config["logfile"]) - def _makeOopsService(self, log_service, oops_config): - """Create the oops service.""" - oops_dir = oops_config["directory"] - oops_reporter = oops_config["reporter"] - return OOPSService(log_service, oops_dir, oops_reporter) - def _makeSiteService(self, papi_xmlrpc, config): """Create the site service.""" site_root = Resource() @@ -129,26 +174,6 @@ site_service.setName("site") return site_service - def _makeBroker(self, broker_config): - """Create the messaging broker.""" - broker_port = broker_config["port"] - broker_host = broker_config["host"] - broker_username = broker_config["username"] - broker_password = broker_config["password"] - broker_vhost = broker_config["vhost"] - - cb_connected = lambda ignored: None # TODO - cb_disconnected = lambda ignored: None # TODO - cb_failed = lambda connector_and_reason: ( - log.err(connector_and_reason[1], "Connection failed")) - client_factory = AMQFactory( - broker_username, broker_password, broker_vhost, - cb_connected, cb_disconnected, cb_failed) - client_service = TCPClient( - broker_host, broker_port, client_factory) - client_service.setName("amqp") - return client_service - def _makeTFTPService(self, tftp_config): """Create the dynamic TFTP service.""" tftp_service = TFTPService( @@ -157,33 +182,58 @@ tftp_service.setName("tftp") return tftp_service + def _makeImageDownloadService(self, rpc_service): + image_download_service = ImageDownloadService( + rpc_service, reactor, get_cluster_uuid()) + image_download_service.setName("image_download") + return image_download_service + + def _makeLeaseUploadService(self, rpc_service): + lease_upload_service = LeaseUploadService( + rpc_service, reactor, get_cluster_uuid()) + lease_upload_service.setName("lease_upload") + return lease_upload_service + + def _makeNodePowerMonitorService(self): + node_monitor = NodePowerMonitorService(get_cluster_uuid(), reactor) + node_monitor.setName("node_monitor") + return node_monitor + def _makeRPCService(self, rpc_config): rpc_service = ClusterClientService(reactor) rpc_service.setName("rpc") return rpc_service + def _makeDHCPProbeService(self, rpc_service): + dhcp_probe_service = DHCPProbeService( + rpc_service, reactor, get_cluster_uuid()) + dhcp_probe_service.setName("dhcp_probe") + return dhcp_probe_service + def makeService(self, options): """Construct a service.""" - services = MultiService() + services = provisioningserver.services config = Config.load(options["config-file"]) log_service = self._makeLogService(config) log_service.setServiceParent(services) - oops_service = self._makeOopsService(log_service, config["oops"]) - oops_service.setServiceParent(services) - - broker_config = config["broker"] - # Connecting to RabbitMQ is not yet a required component of a running - # MAAS installation; skip unless the password has been set explicitly. - if broker_config["password"] != b"test": - client_service = self._makeBroker(broker_config) - client_service.setServiceParent(services) - tftp_service = self._makeTFTPService(config["tftp"]) tftp_service.setServiceParent(services) rpc_service = self._makeRPCService(config["rpc"]) rpc_service.setServiceParent(services) + node_monitor = self._makeNodePowerMonitorService() + node_monitor.setServiceParent(services) + + image_download_service = self._makeImageDownloadService(rpc_service) + image_download_service.setServiceParent(services) + + dhcp_probe_service = self._makeDHCPProbeService(rpc_service) + dhcp_probe_service.setServiceParent(services) + + lease_upload_service = self._makeLeaseUploadService(rpc_service) + lease_upload_service.setServiceParent(services) + return services diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/power/poweraction.py maas-1.7.6+bzr3376/src/provisioningserver/power/poweraction.py --- maas-1.5.4+bzr2294/src/provisioningserver/power/poweraction.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/power/poweraction.py 2015-07-10 01:27:14.000000000 +0000 @@ -22,11 +22,12 @@ import os import subprocess -from celery.app import app_or_default from provisioningserver.utils import ( + escape_py_literal, locate_config, ShellTemplate, ) +from provisioningserver.utils.network import find_ip_via_arp class UnknownPowerType(Exception): @@ -36,27 +37,21 @@ class PowerActionFail(Exception): """Raised when there's a problem executing a power script.""" - def __init__(self, power_action, err): - self.power_action = power_action - self.err = err - - def __str__(self): - message = "%s failed: %s" % (self.power_action.power_type, self.err) - is_process_error = isinstance(self.err, subprocess.CalledProcessError) - if is_process_error and self.err.output: - # Add error output to the message. - message += ":\n" + self.err.output.strip() - return message - - -def get_power_templates_dir(): - """Get the power-templates directory from the config.""" - return app_or_default().conf.POWER_TEMPLATES_DIR - - -def get_power_config_dir(): - """Get the power-config directory from the config.""" - return app_or_default().conf.POWER_CONFIG_DIR + @classmethod + def from_action(cls, power_action, err): + message = "%s failed" % power_action.power_type + is_process_error = isinstance(err, subprocess.CalledProcessError) + # If the failure is a CalledProcessError, be careful not to call + # its __str__ as this will include the actual template text + # (which is the 'command' that was run). + if is_process_error: + message += " with return code %s" % err.returncode + if err.output: + message += ":\n" + ( + err.output.decode("utf-8", "replace").strip()) + else: + message += ":\n%s" % err + return cls(message) class PowerAction: @@ -71,63 +66,77 @@ def __init__(self, power_type): self.path = os.path.join( - self.template_basedir, power_type + ".template") + self.get_template_basedir(), power_type + ".template") if not os.path.exists(self.path): raise UnknownPowerType(power_type) self.power_type = power_type - @property - def template_basedir(self): + def get_template_basedir(self): """Directory where power templates are stored.""" - return get_power_templates_dir() or locate_config('templates/power') + return locate_config('templates/power') - @property - def config_basedir(self): + def get_config_basedir(self): """Directory where power config are stored.""" # By default, power config lives in the same directory as power # templates. This makes it easy to customize them together. - return get_power_config_dir() or locate_config('templates/power') + return locate_config('templates/power') def get_template(self): with open(self.path, "rb") as f: return ShellTemplate(f.read(), name=self.path) - def get_extra_context(self): - """Extra context used when rending the power templates.""" - return { - 'config_dir': self.config_basedir, - } + def update_context(self, context): + """Add and manipulate `context` as necessary.""" + context['config_dir'] = self.get_config_basedir() + context['escape_py_literal'] = escape_py_literal + if 'mac_address' in context: + mac_address = context['mac_address'] + ip_address = find_ip_via_arp(mac_address) + context['ip_address'] = ip_address + else: + context.setdefault('ip_address', None) + return context - def render_template(self, template, **kwargs): + def render_template(self, template, context): try: - kwargs.update(self.get_extra_context()) - return template.substitute(kwargs) + return template.substitute(context) except NameError as error: - raise PowerActionFail(self, error) + raise PowerActionFail.from_action(self, error) def run_shell(self, commands): """Execute raw shell script (as rendered from a template). :param commands: String containing shell script. + :return: Standard output and standard error returned by the execution + of the shell script. :raises: :class:`PowerActionFail` """ # This might need retrying but it could be better to leave that # to the individual scripts. - try: - output = subprocess.check_output( - commands, shell=True, stderr=subprocess.STDOUT, close_fds=True) - except subprocess.CalledProcessError as e: - raise PowerActionFail(self, e) - # This output is only examined in tests, execute just ignores it - return output + shell = ("/bin/sh",) + process = subprocess.Popen( + shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, close_fds=True) + output, _ = process.communicate(commands) + if process.wait() == 0: + return output.strip() + else: + raise PowerActionFail.from_action( + self, subprocess.CalledProcessError( + process.returncode, shell, output)) - def execute(self, **kwargs): + def execute(self, **context): """Execute the template. + :return: Standard output and standard error returned by the execution + of the template. + Any supplied parameters will be passed to the template as substitution values. """ template = self.get_template() - rendered = self.render_template(template, **kwargs) - self.run_shell(rendered) + context = self.update_context(context) + rendered = self.render_template( + template=template, context=context) + return self.run_shell(rendered) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/power/tests/test_poweraction.py maas-1.7.6+bzr3376/src/provisioningserver/power/tests/test_poweraction.py --- maas-1.5.4+bzr2294/src/provisioningserver/power/tests/test_poweraction.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/power/tests/test_poweraction.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.power`. @@ -19,8 +19,12 @@ import re from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase -from mock import Mock +from mock import ( + ANY, + sentinel, + ) import provisioningserver.power.poweraction from provisioningserver.power.poweraction import ( PowerAction, @@ -28,6 +32,7 @@ UnknownPowerType, ) from provisioningserver.utils import ( + escape_py_literal, locate_config, ShellTemplate, ) @@ -41,11 +46,9 @@ class TestPowerAction(MAASTestCase): """Tests for PowerAction.""" - def configure_templates_dir(self, path=None): + def configure_templates_dir(self, path): """Configure POWER_TEMPLATES_DIR to `path`.""" - self.patch( - provisioningserver.power.poweraction, 'get_power_templates_dir', - Mock(return_value=path)) + self.patch(PowerAction, 'get_template_basedir').return_value = path def test_init_raises_for_unknown_powertype(self): powertype = factory.make_name("powertype", sep='') @@ -58,18 +61,18 @@ self.assertEqual('ether_wake', pa.power_type) def test_init_stores_template_path(self): - self.configure_templates_dir() power_type = 'ether_wake' pa = PowerAction(power_type) - path = os.path.join(pa.template_basedir, power_type + ".template") + path = os.path.join( + pa.get_template_basedir(), + power_type + ".template") self.assertEqual(path, pa.path) def test_template_basedir_defaults_to_config_dir(self): - self.configure_templates_dir() power_type = 'ether_wake' self.assertEqual( locate_config('templates/power'), - PowerAction(power_type).template_basedir) + PowerAction(power_type).get_template_basedir()) def test_template_basedir_prefers_configured_value(self): power_type = 'ether_wake' @@ -79,17 +82,16 @@ self.configure_templates_dir(template_dir) self.assertEqual( template_dir, - PowerAction('ether_wake').template_basedir) + PowerAction('ether_wake').get_template_basedir()) def test_get_template_retrieves_template(self): - self.configure_templates_dir() pa = PowerAction('ether_wake') template = pa.get_template() self.assertIsInstance(template, ShellTemplate) self.assertThat(pa.path, FileContains(template.content)) def test_get_template_looks_for_template_in_template_basedir(self): - contents = factory.getRandomString() + contents = factory.make_string() power_type = 'ether_wake' template_name = '%s.template' % power_type template = self.make_file(name=template_name, contents=contents) @@ -103,20 +105,22 @@ # its variables. pa = PowerAction('ether_wake') template = ShellTemplate("template: {{mac}}") - rendered = pa.render_template(template, mac="mymac") + rendered = pa.render_template( + template, pa.update_context({"mac": "mymac"})) self.assertEqual("template: mymac", rendered) def test_render_template_raises_PowerActionFail(self): # If not enough arguments are supplied to fill in template # variables then a PowerActionFail is raised. pa = PowerAction('ether_wake') - template_name = factory.getRandomString() + template_name = factory.make_string() template = ShellTemplate("template: {{mac}}", name=template_name) self.assertThat( - lambda: pa.render_template(template), + lambda: pa.render_template(template, pa.update_context({})), Raises(MatchesException( PowerActionFail, - ".*name 'mac' is not defined at line \d+ column \d+ " + "ether_wake failed:\n" + "name 'mac' is not defined at line \d+ column \d+ " "in file %s" % re.escape(template_name)))) def _create_template_file(self, template): @@ -126,7 +130,7 @@ def run_action(self, path, **kwargs): pa = PowerAction('ether_wake') pa.path = path - pa.execute(**kwargs) + return pa.execute(**kwargs) def test_execute(self): # execute() should run the template through a shell. @@ -138,12 +142,19 @@ self.run_action(path, mac="test", outfile=output_file) self.assertThat(output_file, FileContains("working test\n")) + def test_execute_return_execution_result(self): + template = "echo ' test \n'" + path = self._create_template_file(template) + output = self.run_action(path) + # run_action() returns the 'stripped' output. + self.assertEqual('test', output) + def test_execute_raises_PowerActionFail_when_script_fails(self): path = self._create_template_file("this_is_not_valid_shell") self.assertThat( lambda: self.run_action(path), Raises(MatchesException( - PowerActionFail, "ether_wake failed.* return.* 127"))) + PowerActionFail, "ether_wake failed with return code 127"))) def test_execute_raises_PowerActionFail_with_output(self): path = self._create_template_file("echo reason for failure; exit 1") @@ -156,7 +167,7 @@ pa = PowerAction('ether_wake') self.assertRaises( PowerActionFail, - pa.execute, power_change='off', mac=factory.getRandomMACAddress()) + pa.execute, power_change='off', mac=factory.make_mac_address()) def test_fence_cdu_checks_state(self): # We can't test the fence_cdu template in detail (and it may be @@ -166,34 +177,38 @@ # line. It will complain about this and fail. action = PowerAction("fence_cdu") script = action.render_template( - action.get_template(), power_change='on', - power_address='mysystem', power_id='system', - power_user='me', power_pass='me', fence_cdu='echo') + action.get_template(), + action.update_context(dict( + power_change='on', power_address='mysystem', + power_id='system', power_user='me', power_pass='me', + fence_cdu='echo')), + ) output = action.run_shell(script) self.assertIn("Got unknown power state from fence_cdu", output) - def configure_power_config_dir(self, path=None): + def configure_power_config_dir(self, path): """Configure POWER_CONFIG_DIR to `path`.""" - self.patch( - provisioningserver.power.poweraction, 'get_power_config_dir', - Mock(return_value=path)) + self.patch(PowerAction, 'get_config_basedir').return_value = path def test_config_basedir_defaults_to_local_dir(self): - self.configure_power_config_dir() power_type = 'ether_wake' self.assertEqual( locate_config('templates/power'), - PowerAction(power_type).config_basedir) + PowerAction(power_type).get_config_basedir()) def test_ipmi_script_includes_config_dir(self): conf_dir = factory.make_name('power_config_dir') self.configure_power_config_dir(conf_dir) action = PowerAction('ipmi') script = action.render_template( - action.get_template(), power_change='on', - power_address='mystystem', power_user='me', power_pass='me', - ipmipower='echo', ipmi_chassis_config='echo', config_dir='dir', - ipmi_config='file.conf', power_driver='LAN', ip_address='') + action.get_template(), + action.update_context(dict( + power_change='on', power_address='mystystem', + power_user='me', power_pass='me', ipmipower='echo', + ipmi_chassis_config='echo', config_dir='dir', + ipmi_config='file.conf', power_driver='LAN', + ip_address='', power_off_mode='hard')), + ) self.assertIn(conf_dir, script) def test_moonshot_checks_state(self): @@ -204,9 +219,12 @@ # line. It will complain about this and fail. action = PowerAction("moonshot") script = action.render_template( - action.get_template(), power_change='on', - power_address='mysystem', power_user='me', - power_pass='me', power_hwaddress='me', ipmitool='echo') + action.get_template(), + action.update_context(dict( + power_change='on', power_address='mysystem', + power_user='me', power_pass='me', power_hwaddress='me', + ipmitool='echo')), + ) output = action.run_shell(script) self.assertIn("Got unknown power state from ipmipower", output) @@ -216,9 +234,11 @@ # rendering namespace so I passed on that. action = PowerAction('ucsm') script = action.render_template( - action.get_template(), power_address='foo', - power_user='bar', power_pass='baz', - uuid=factory.getRandomUUID(), power_change='on') + action.get_template(), + action.update_context(dict( + power_address='foo', power_user='bar', power_pass='baz', + uuid=factory.make_UUID(), power_change='on')), + ) self.assertIn('power_control_ucsm', script) def test_mscm_renders_template(self): @@ -227,7 +247,69 @@ # rendering namespace so I passed on that. action = PowerAction('mscm') script = action.render_template( - action.get_template(), power_address='foo', - power_user='bar', power_pass='baz', - node_id='c1n1', power_change='on') + action.get_template(), + action.update_context(dict( + power_address='foo', power_user='bar', power_pass='baz', + node_id='c1n1', power_change='on')), + ) self.assertIn('power_control_mscm', script) + + +class TestTemplateContext(MAASTestCase): + + def make_stubbed_power_action(self): + power_action = PowerAction("ipmi") + render_template = self.patch(power_action, "render_template") + render_template.return_value = "echo done" + return power_action + + def test_basic_context(self): + power_action = self.make_stubbed_power_action() + result = power_action.execute() + self.assertEqual("done", result) + self.assertThat( + power_action.render_template, + MockCalledOnceWith( + template=ANY, + context=dict( + config_dir=locate_config("templates/power"), + escape_py_literal=escape_py_literal, ip_address=None, + ), + )) + + def test_ip_address_is_unmolested_if_set(self): + power_action = self.make_stubbed_power_action() + ip_address = factory.make_ipv6_address() + result = power_action.execute(ip_address=ip_address) + self.assertEqual("done", result) + self.assertThat( + power_action.render_template, + MockCalledOnceWith( + template=ANY, + context=dict( + config_dir=locate_config("templates/power"), + escape_py_literal=escape_py_literal, + ip_address=ip_address, + ), + )) + + def test_execute_looks_up_ip_address_from_mac_address(self): + find_ip_via_arp = self.patch( + provisioningserver.power.poweraction, "find_ip_via_arp") + find_ip_via_arp.return_value = sentinel.ip_address_from_mac + + power_action = self.make_stubbed_power_action() + mac_address = factory.make_mac_address() + result = power_action.execute(mac_address=mac_address) + self.assertEqual("done", result) + self.assertThat( + power_action.render_template, + MockCalledOnceWith( + template=ANY, + context=dict( + config_dir=locate_config("templates/power"), + escape_py_literal=escape_py_literal, + ip_address=sentinel.ip_address_from_mac, + mac_address=mac_address, + ), + )) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/power_schema.py maas-1.7.6+bzr3376/src/provisioningserver/power_schema.py --- maas-1.5.4+bzr2294/src/provisioningserver/power_schema.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/power_schema.py 2015-07-10 01:27:14.000000000 +0000 @@ -45,7 +45,7 @@ CHOICE_FIELD_SCHEMA = { 'type': 'array', 'items': { - 'title': "Power type paramter field choice", + 'title': "Power type parameter field choice", 'type': 'array', 'minItems': 2, 'maxItems': 2, @@ -164,7 +164,7 @@ }, { 'name': 'virsh', - 'description': 'virsh (virtual systems)', + 'description': 'Virsh (virtual systems)', 'fields': [ make_json_field('power_address', "Power address"), make_json_field('power_id', "Power ID"), @@ -195,8 +195,9 @@ make_json_field('power_pass', "Power password"), make_json_field( 'mac_address', - "MAC address - the IP is looked up with ARP and is used if " - "IP address is empty. This is better when the BMC uses DHCP."), + "Use ARP on this MAC address to discover the BMC's IP " + "address if its IP is likely to change, e.g. if it's " + "assigned via DHCP.") ], }, { @@ -231,8 +232,9 @@ make_json_field('power_pass', "Power password"), make_json_field( 'power_address', - "An IP address to use instead of the node's primary NIC's IP " - "(i.e. the IP of the MAC above, looked up with ARP)."), + "Use ARP on this MAC address to discover the AMT IP " + "address if its IP is likely to change, e.g. if it's " + "assigned via DHCP.") ], }, { diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/dhcp_probe_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/dhcp_probe_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/dhcp_probe_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/dhcp_probe_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,152 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +""" DHCP probing service.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "DHCPProbeService", + ] + + +from datetime import timedelta +import socket + +from provisioningserver.dhcp.detect import probe_interface +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.rpc.region import ( + GetClusterInterfaces, + ReportForeignDHCPServer, + ) +from provisioningserver.utils.twisted import ( + pause, + retries, + ) +from twisted.application.internet import TimerService +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, + ) +from twisted.internet.threads import deferToThread +from twisted.protocols.amp import UnhandledCommand + + +maaslog = get_maas_logger("dhcp.probe") + + +class DHCPProbeService(TimerService, object): + """Service to probe for DHCP servers on this cluster's network. + + Built on top of Twisted's `TimerService`. + + :param reactor: An `IReactor` instance. + :param cluster_uuid: This cluster's UUID. + """ + + check_interval = timedelta(minutes=10).total_seconds() + + def __init__(self, client_service, reactor, cluster_uuid): + # Call self.try_probe_dhcp() every self.check_interval. + super(DHCPProbeService, self).__init__( + self.check_interval, self.try_probe_dhcp) + self.clock = reactor + self.uuid = cluster_uuid + self.client_service = client_service + + @inlineCallbacks + def _get_cluster_interfaces(self, client): + """Return the interfaces for this cluster.""" + try: + response = yield client( + GetClusterInterfaces, cluster_uuid=self.uuid) + except UnhandledCommand: + # The region hasn't been upgraded to support this method + # yet, so give up. Returning an empty dict means that this + # run will end, since there are no interfaces to check. + maaslog.error( + "Unable to query region for interfaces: Region does not " + "support the GetClusterInterfaces RPC method.") + returnValue({}) + else: + returnValue(response['interfaces']) + + @inlineCallbacks + def _inform_region_of_foreign_dhcp(self, client, name, + foreign_dhcp_ip): + """Tell the region that there's a rogue DHCP server. + + :param client: The RPC client to use. + :param name: The name of the network interface where the rogue + DHCP server was found. + :param foreign_dhcp_ip: The IP address of the rogue server. + """ + try: + yield client( + ReportForeignDHCPServer, cluster_uuid=self.uuid, + interface_name=name, foreign_dhcp_ip=foreign_dhcp_ip) + except UnhandledCommand: + # Not a lot we can do here... The region doesn't support + # this method yet. + maaslog.error( + "Unable to inform region of rogue DHCP server: the region " + "does not yet support the ReportForeignDHCPServer RPC " + "method.") + + @inlineCallbacks + def probe_dhcp(self): + """Find all the interfaces on this cluster and probe for DHCP servers. + """ + client = None + for elapsed, remaining, wait in retries(15, 5, self.clock): + try: + client = self.client_service.getClient() + break + except NoConnectionsAvailable: + yield pause(wait, self.clock) + else: + maaslog.error( + "Can't initiate DHCP probe, no RPC connection to region.") + return + + cluster_interfaces = yield self._get_cluster_interfaces(client) + # Iterate over interfaces and probe each one. + for interface in cluster_interfaces: + try: + servers = yield deferToThread( + probe_interface, interface['interface'], interface['ip']) + except socket.error: + maaslog.error( + "Failed to probe sockets; did you configure authbind as " + "per HACKING.txt?") + break + else: + if len(servers) > 0: + # Only send one, if it gets cleared out then the + # next detection pass will send a different one, if it + # still exists. + yield self._inform_region_of_foreign_dhcp( + client, interface['name'], servers.pop()) + else: + yield self._inform_region_of_foreign_dhcp( + client, interface['name'], None) + + @inlineCallbacks + def try_probe_dhcp(self): + maaslog.debug("Running periodic DHCP probe.") + try: + yield self.probe_dhcp() + except Exception as error: + maaslog.error( + "Unable to probe for rogue DHCP servers: %s", + unicode(error)) + else: + maaslog.debug("Finished periodic DHCP probe.") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/image_download_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/image_download_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/image_download_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/image_download_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,135 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Service to periodically refresh the boot images.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "ImageDownloadService", + ] + + +from datetime import timedelta + +from provisioningserver.boot import tftppath +from provisioningserver.logger import get_maas_logger +from provisioningserver.rpc.boot_images import import_boot_images +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.rpc.region import ( + GetBootSources, + GetBootSourcesV2, + GetProxies, + ) +from provisioningserver.utils.twisted import ( + pause, + retries, + ) +from twisted.application.internet import TimerService +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, + ) +from twisted.python import log +from twisted.spread.pb import NoSuchMethod + + +maaslog = get_maas_logger("boot_image_download_service") + + +class ImageDownloadService(TimerService, object): + """Twisted service to periodically refresh ephemeral images. + + :param client_service: A `ClusterClientService` instance for talking + to the region controller. + :param reactor: An `IReactor` instance. + """ + + check_interval = timedelta(minutes=5).total_seconds() + + def __init__(self, client_service, reactor, cluster_uuid): + # Call self.check() every self.check_interval. + super(ImageDownloadService, self).__init__( + self.check_interval, self.try_download) + self.clock = reactor + self.client_service = client_service + self.uuid = cluster_uuid + + def try_download(self): + """Wrap download attempts in something that catches Failures. + + Log the full error to the Twisted log, and a concise error to + the maas log. + """ + def download_failure(failure): + log.err(failure) + maaslog.error( + "Failed to download images: %s", failure.getErrorMessage()) + + return self.maybe_start_download().addErrback(download_failure) + + @inlineCallbacks + def _get_boot_sources(self, client): + """Gets the boot sources from the region.""" + try: + sources = yield client(GetBootSourcesV2, uuid=self.uuid) + except NoSuchMethod: + # Region has not been upgraded to support the new call, use the + # old call. The old call did not provide the new os selection + # parameter. Region does not support boot source selection by os, + # so its set too allow all operating systems. + sources = yield client(GetBootSources, uuid=self.uuid) + for source in sources['sources']: + for selection in source['selections']: + selection['os'] = '*' + returnValue(sources) + + @inlineCallbacks + def _start_download(self): + client = None + # Retry a few times, since this service usually comes up before + # the RPC service. + for elapsed, remaining, wait in retries(15, 5, self.clock): + try: + client = self.client_service.getClient() + break + except NoConnectionsAvailable: + yield pause(wait, self.clock) + else: + maaslog.error( + "Can't initiate image download, no RPC connection to region.") + return + + # Get sources from region + sources = yield self._get_boot_sources(client) + # Get http proxy from region + proxies = yield client(GetProxies) + + def get_proxy_url(scheme): + url = proxies.get(scheme) # url is a ParsedResult. + return None if url is None else url.geturl() + + yield import_boot_images( + sources.get("sources"), get_proxy_url("http"), + get_proxy_url("https")) + + @inlineCallbacks + def maybe_start_download(self): + """Check the time the last image refresh happened and initiate a new + one if older than 15 minutes. + """ + last_modified = tftppath.maas_meta_last_modified() + if last_modified is None: + yield self._start_download() + return + + age_in_seconds = self.clock.seconds() - last_modified + if age_in_seconds >= timedelta(minutes=15).total_seconds(): + yield self._start_download() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/lease_upload_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/lease_upload_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/lease_upload_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/lease_upload_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,130 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Twisted service that periodically uploads DHCP leases to the region.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "convert_leases_to_mappings", + "convert_mappings_to_leases", + "LeaseUploadService", + ] + + +from provisioningserver.dhcp.leases import ( + check_lease_changes, + record_lease_state, + ) +from provisioningserver.logger import get_maas_logger +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.rpc.region import UpdateLeases +from provisioningserver.utils.twisted import ( + pause, + retries, + ) +from twisted.application.internet import TimerService +from twisted.internet.defer import inlineCallbacks +from twisted.internet.threads import deferToThread +from twisted.python import log + + +maaslog = get_maas_logger("lease_upload_service") + + +def convert_mappings_to_leases(mappings): + """Convert AMP mappings to record_lease_state() leases. + + Take mappings, as used by UpdateLeases, and turn into leases + as used by record_lease_state(). + """ + return { + mapping["ip"]: mapping["mac"] + for mapping in mappings + } + + +def convert_leases_to_mappings(leases): + """Convert record_lease_state() leases into UpdateLeases mappings. + + Take the leases dict, as returned by record_lease_state(), and + turn it into a mappings list suitable for transportation in + the UpdateLeases AMP command. + """ + return [ + {"ip": ip, "mac": leases[ip]} + for ip in leases + ] + + +class LeaseUploadService(TimerService, object): + """Twisted service to periodically upload DHCP leases to the region. + + :param client_service: A `ClusterClientService` instance for talking + to the region controller. + :param reactor: An `IReactor` instance. + """ + + check_interval = 60 # In seconds. + + def __init__(self, client_service, reactor, cluster_uuid): + # Call self.try_upload() every self.check_interval. + super(LeaseUploadService, self).__init__( + self.check_interval, self.try_upload) + self.clock = reactor + self.client_service = client_service + self.uuid = cluster_uuid + maaslog.info("LeaseUploadService starting.") + + def try_upload(self): + """Wrap upload attempts in something that catches Failures. + + Log the full error to the Twisted log, and a concise error to + the maas log. + """ + def upload_failure(failure): + log.err(failure) + maaslog.error( + "Failed to upload leases: %s", failure.getErrorMessage()) + + return self._get_client_and_start_upload().addErrback(upload_failure) + + @inlineCallbacks + def _get_client_and_start_upload(self): + # Retry a few times, since this service usually comes up before + # the RPC service. + for elapsed, remaining, wait in retries(15, 5, self.clock): + try: + client = self.client_service.getClient() + break + except NoConnectionsAvailable: + yield pause(wait, clock=self.clock) + else: + maaslog.error( + "Failed to connect to region controller, cannot upload leases") + return + yield self._start_upload(client) + + @inlineCallbacks + def _start_upload(self, client): + maaslog.debug("Scanning DHCP leases...") + updated_lease_info = yield deferToThread(check_lease_changes) + if updated_lease_info is None: + maaslog.debug("No leases changed since last scan") + else: + timestamp, leases = updated_lease_info + record_lease_state(timestamp, leases) + mappings = convert_leases_to_mappings(leases) + maaslog.info( + "Uploading %d DHCP leases to region controller.", + len(mappings)) + yield client( + UpdateLeases, uuid=self.uuid, mappings=mappings) + maaslog.debug("Lease upload complete.") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/node_power_monitor_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/node_power_monitor_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/node_power_monitor_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/node_power_monitor_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,98 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Service to periodically query the power state on this cluster's nodes.""" + + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "NodePowerMonitorService" +] + +from datetime import timedelta + +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.exceptions import ( + NoConnectionsAvailable, + NoSuchCluster, + ) +from provisioningserver.rpc.power import query_all_nodes +from provisioningserver.rpc.region import ListNodePowerParameters +from provisioningserver.utils.twisted import ( + pause, + retries, + ) +from twisted.application.internet import TimerService +from twisted.internet.defer import inlineCallbacks +from twisted.python import log + + +maaslog = get_maas_logger("power_monitor_service") + + +class NodePowerMonitorService(TimerService, object): + """Service to monitor the power status of all nodes in this cluster.""" + + check_interval = timedelta(minutes=5).total_seconds() + max_nodes_at_once = 5 + + def __init__(self, cluster_uuid, clock=None): + # Call self.query_nodes() every self.check_interval. + super(NodePowerMonitorService, self).__init__( + self.check_interval, self.try_query_nodes, cluster_uuid) + self.clock = clock + + def try_query_nodes(self, uuid): + """Attempt to query nodes' power states. + + Log errors on failure, but do not propagate them up; that will + stop the timed loop from running. + """ + def query_nodes_failed(failure): + # Log the error in full to the Twisted log. + log.err(failure) + # Log something concise to the MAAS log. + maaslog.error( + "Failed to query nodes' power status: %s", + failure.getErrorMessage()) + + return self.query_nodes(uuid).addErrback(query_nodes_failed) + + @inlineCallbacks + def query_nodes(self, uuid): + # Retry a few times, since this service usually comes up before + # the RPC service. + for elapsed, remaining, wait in retries(15, 5, self.clock): + try: + client = getRegionClient() + except NoConnectionsAvailable: + yield pause(wait, self.clock) + else: + break + else: + maaslog.error( + "Cannot monitor nodes' power status; " + "region not available.") + return + + # Get the nodes' power parameters from the region. + try: + response = yield client(ListNodePowerParameters, uuid=uuid) + except NoSuchCluster: + maaslog.error( + "This cluster (%s) is not recognised by the region.", + uuid) + else: + node_power_parameters = response['nodes'] + yield query_all_nodes( + node_power_parameters, + max_concurrency=self.max_nodes_at_once, clock=self.clock) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_dhcp_probe_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_dhcp_probe_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_dhcp_probe_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_dhcp_probe_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,247 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for periodic DHCP prober.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + + +from maastesting.factory import factory +from maastesting.matchers import ( + get_mock_calls, + HasLength, + MockCalledOnceWith, + MockNotCalled, + ) +from maastesting.testcase import MAASTwistedRunTest +from mock import ( + Mock, + sentinel, + ) +from provisioningserver.pserv_services import dhcp_probe_service +from provisioningserver.pserv_services.dhcp_probe_service import ( + DHCPProbeService, + ) +from provisioningserver.rpc import ( + getRegionClient, + region, + ) +from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture +from provisioningserver.testing.testcase import PservTestCase +from twisted.internet import defer +from twisted.internet.task import Clock + + +class TestDHCPProbeService(PservTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def setUp(self): + super(TestDHCPProbeService, self).setUp() + self.cluster_uuid = factory.make_UUID() + + def patch_rpc_methods(self): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop( + region.GetClusterInterfaces, region.ReportForeignDHCPServer) + return protocol, connecting + + def make_cluster_interface_values(self, ip=None): + """Return a dict describing a cluster interface.""" + if ip is None: + ip = factory.make_ipv4_address() + return { + 'name': factory.make_name('interface'), + 'interface': factory.make_name('eth'), + 'ip': ip, + } + + def test_is_called_every_interval(self): + clock = Clock() + service = DHCPProbeService( + sentinel.service, clock, self.cluster_uuid) + + # Avoid actually probing + probe_dhcp = self.patch(service, 'probe_dhcp') + + # Until the service has started, periodicprobe_dhcp() won't + # be called. + self.assertThat(probe_dhcp, MockNotCalled()) + + # The first call is issued at startup. + service.startService() + self.assertThat(probe_dhcp, MockCalledOnceWith()) + + # Wind clock forward one second less than the desired interval. + clock.advance(service.check_interval - 1) + + # No more periodic calls made. + self.assertEqual(1, len(get_mock_calls(probe_dhcp))) + + # Wind clock forward one second, past the interval. + clock.advance(1) + + # Now there were two calls. + self.assertThat(get_mock_calls(probe_dhcp), HasLength(2)) + + def test_probe_is_initiated_in_new_thread(self): + clock = Clock() + interface = self.make_cluster_interface_values() + rpc_service = Mock() + rpc_client = rpc_service.getClient.return_value + rpc_client.side_effect = [ + defer.succeed(dict(interfaces=[interface])), + ] + + # We could patch out 'periodic_probe_task' instead here but this + # is better because: + # 1. The former requires spinning the reactor again before being + # able to test the result. + # 2. This way there's no thread to clean up after the test. + deferToThread = self.patch(dhcp_probe_service, 'deferToThread') + deferToThread.return_value = defer.succeed(None) + service = DHCPProbeService( + rpc_service, clock, self.cluster_uuid) + service.startService() + self.assertThat( + deferToThread, MockCalledOnceWith( + dhcp_probe_service.probe_interface, + interface['interface'], interface['ip'])) + + @defer.inlineCallbacks + def test_exits_gracefully_if_cant_get_interfaces(self): + clock = Clock() + maaslog = self.patch(dhcp_probe_service, 'maaslog') + + protocol, connecting = self.patch_rpc_methods() + self.addCleanup((yield connecting)) + + del protocol._commandDispatch[ + region.GetClusterInterfaces.commandName] + rpc_service = Mock() + rpc_service.getClient.return_value = getRegionClient() + service = DHCPProbeService( + rpc_service, clock, self.cluster_uuid) + yield service.startService() + yield service.stopService() + + self.assertThat( + maaslog.error, MockCalledOnceWith( + "Unable to query region for interfaces: Region does not " + "support the GetClusterInterfaces RPC method.")) + + @defer.inlineCallbacks + def test_exits_gracefully_if_cant_report_foreign_dhcp_server(self): + clock = Clock() + maaslog = self.patch(dhcp_probe_service, 'maaslog') + deferToThread = self.patch( + dhcp_probe_service, 'deferToThread') + deferToThread.return_value = defer.succeed(['192.168.0.100']) + protocol, connecting = self.patch_rpc_methods() + self.addCleanup((yield connecting)) + + del protocol._commandDispatch[ + region.ReportForeignDHCPServer.commandName] + protocol.GetClusterInterfaces.return_value = { + 'interfaces': [ + self.make_cluster_interface_values(ip='192.168.0.1'), + ], + } + + rpc_service = Mock() + rpc_service.getClient.return_value = getRegionClient() + service = DHCPProbeService( + rpc_service, clock, self.cluster_uuid) + yield service.startService() + yield service.stopService() + + self.assertThat( + maaslog.error, MockCalledOnceWith( + "Unable to inform region of rogue DHCP server: the region " + "does not yet support the ReportForeignDHCPServer RPC " + "method.")) + + def test_logs_errors(self): + clock = Clock() + maaslog = self.patch(dhcp_probe_service, 'maaslog') + service = DHCPProbeService( + sentinel.service, clock, self.cluster_uuid) + error_message = factory.make_string() + self.patch(service, 'probe_dhcp').side_effect = Exception( + error_message) + service.startService() + self.assertThat( + maaslog.error, MockCalledOnceWith( + "Unable to probe for rogue DHCP servers: %s", + error_message)) + + @defer.inlineCallbacks + def test_reports_foreign_dhcp_servers_to_region(self): + clock = Clock() + protocol, connecting = self.patch_rpc_methods() + self.addCleanup((yield connecting)) + + deferToThread = self.patch( + dhcp_probe_service, 'deferToThread') + foreign_dhcp_ip = factory.make_ipv4_address() + deferToThread.return_value = defer.succeed( + [foreign_dhcp_ip]) + + interface = self.make_cluster_interface_values() + protocol.GetClusterInterfaces.return_value = { + 'interfaces': [interface], + } + + rpc_service = Mock() + rpc_service.getClient.return_value = getRegionClient() + service = DHCPProbeService( + rpc_service, clock, self.cluster_uuid) + yield service.startService() + yield service.stopService() + + self.assertThat( + protocol.ReportForeignDHCPServer, + MockCalledOnceWith( + protocol, + cluster_uuid=self.cluster_uuid, + interface_name=interface['name'], + foreign_dhcp_ip=foreign_dhcp_ip)) + + @defer.inlineCallbacks + def test_reports_lack_of_foreign_dhcp_servers_to_region(self): + clock = Clock() + protocol, connecting = self.patch_rpc_methods() + self.addCleanup((yield connecting)) + + deferToThread = self.patch( + dhcp_probe_service, 'deferToThread') + deferToThread.return_value = defer.succeed([]) + + interface = self.make_cluster_interface_values() + protocol.GetClusterInterfaces.return_value = { + 'interfaces': [interface], + } + + rpc_service = Mock() + rpc_service.getClient.return_value = getRegionClient() + service = DHCPProbeService( + rpc_service, clock, self.cluster_uuid) + yield service.startService() + yield service.stopService() + + self.assertThat( + protocol.ReportForeignDHCPServer, + MockCalledOnceWith( + protocol, + cluster_uuid=self.cluster_uuid, + interface_name=interface['name'], + foreign_dhcp_ip=None)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_image_download_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_image_download_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_image_download_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_image_download_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,273 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for provisioningserver.pserv_services.image_download_service""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from datetime import timedelta +from urlparse import urlparse + +from fixtures import FakeLogger +from maastesting.factory import factory +from maastesting.matchers import ( + get_mock_calls, + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from maastesting.testcase import MAASTwistedRunTest +from mock import ( + call, + Mock, + sentinel, + ) +from provisioningserver.boot import tftppath +from provisioningserver.pserv_services.image_download_service import ( + ImageDownloadService, + ) +from provisioningserver.rpc import boot_images +from provisioningserver.rpc.boot_images import _run_import +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.rpc.region import ( + GetBootSources, + GetBootSourcesV2, + ) +from provisioningserver.rpc.testing import TwistedLoggerFixture +from provisioningserver.testing.testcase import PservTestCase +from testtools.deferredruntest import extract_result +from twisted.application.internet import TimerService +from twisted.internet import defer +from twisted.internet.task import Clock +from twisted.spread.pb import NoSuchMethod + + +class TestPeriodicImageDownloadService(PservTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_init(self): + service = ImageDownloadService( + sentinel.service, sentinel.clock, sentinel.uuid) + self.assertIsInstance(service, TimerService) + self.assertIs(service.clock, sentinel.clock) + self.assertIs(service.uuid, sentinel.uuid) + self.assertIs(service.client_service, sentinel.service) + + def patch_download(self, service, return_value): + patched = self.patch(service, '_start_download') + patched.return_value = defer.succeed(return_value) + return patched + + def test_is_called_every_interval(self): + clock = Clock() + service = ImageDownloadService( + sentinel.service, clock, sentinel.uuid) + # Avoid actual downloads: + self.patch_download(service, None) + maas_meta_last_modified = self.patch( + tftppath, 'maas_meta_last_modified') + maas_meta_last_modified.return_value = None + service.startService() + + # The first call is issued at startup. + self.assertEqual(1, len(get_mock_calls(maas_meta_last_modified))) + + # Wind clock forward one second less than the desired interval. + clock.advance(service.check_interval - 1) + # No more periodic calls made. + self.assertEqual(1, len(get_mock_calls(maas_meta_last_modified))) + + # Wind clock forward one second, past the interval. + clock.advance(1) + + # Now there were two calls. + self.assertEqual(2, len(get_mock_calls(maas_meta_last_modified))) + + # Forward another interval, should be three calls. + clock.advance(service.check_interval) + self.assertEqual(3, len(get_mock_calls(maas_meta_last_modified))) + + def test_initiates_download_if_no_meta_file(self): + clock = Clock() + service = ImageDownloadService( + sentinel.service, clock, sentinel.uuid) + _start_download = self.patch_download(service, None) + self.patch( + tftppath, + 'maas_meta_last_modified').return_value = None + service.startService() + self.assertThat(_start_download, MockCalledOnceWith()) + + def test_initiates_download_if_15_minutes_has_passed(self): + clock = Clock() + service = ImageDownloadService( + sentinel.service, clock, sentinel.uuid) + _start_download = self.patch_download(service, None) + one_week_ago = clock.seconds() - timedelta(minutes=15).total_seconds() + self.patch( + tftppath, + 'maas_meta_last_modified').return_value = one_week_ago + service.startService() + self.assertThat(_start_download, MockCalledOnceWith()) + + def test_no_download_if_15_minutes_has_not_passed(self): + clock = Clock() + service = ImageDownloadService( + sentinel.service, clock, sentinel.uuid) + _start_download = self.patch_download(service, None) + one_week = timedelta(minutes=15).total_seconds() + self.patch( + tftppath, + 'maas_meta_last_modified').return_value = clock.seconds() + clock.advance(one_week - 1) + service.startService() + self.assertThat(_start_download, MockNotCalled()) + + def test_download_is_initiated_in_new_thread(self): + clock = Clock() + maas_meta_last_modified = self.patch( + tftppath, 'maas_meta_last_modified') + one_week = timedelta(minutes=15).total_seconds() + maas_meta_last_modified.return_value = clock.seconds() - one_week + http_proxy = factory.make_simple_http_url() + https_proxy = factory.make_simple_http_url() + rpc_client = Mock() + client_call = Mock() + client_call.side_effect = [ + defer.succeed(dict(sources=sentinel.sources)), + defer.succeed(dict( + http=urlparse(http_proxy), + https=urlparse(https_proxy))), + ] + rpc_client.getClient.return_value = client_call + + # We could patch out 'import_boot_images' instead here but I + # don't do that for 2 reasons: + # 1. It requires spinning the reactor again before being able to + # test the result. + # 2. It means there's no thread to clean up after the test. + deferToThread = self.patch(boot_images, 'deferToThread') + deferToThread.return_value = defer.succeed(None) + service = ImageDownloadService( + rpc_client, clock, sentinel.uuid) + service.startService() + self.assertThat( + deferToThread, MockCalledOnceWith( + _run_import, sentinel.sources, http_proxy=http_proxy, + https_proxy=https_proxy)) + + def test_no_download_if_no_rpc_connections(self): + rpc_client = Mock() + failure = NoConnectionsAvailable() + rpc_client.getClient.side_effect = failure + + deferToThread = self.patch(boot_images, 'deferToThread') + service = ImageDownloadService( + rpc_client, Clock(), sentinel.uuid) + service.startService() + self.assertThat(deferToThread, MockNotCalled()) + + def test_logs_other_errors(self): + service = ImageDownloadService( + sentinel.rpc, Clock(), sentinel.uuid) + + maybe_start_download = self.patch(service, "maybe_start_download") + maybe_start_download.return_value = defer.fail( + ZeroDivisionError("Such a shame I can't divide by zero")) + + with FakeLogger("maas") as maaslog, TwistedLoggerFixture(): + d = service.try_download() + + self.assertEqual(None, extract_result(d)) + self.assertDocTestMatches( + "Failed to download images: " + "Such a shame I can't divide by zero", + maaslog.output) + + +class TestGetBootSources(PservTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + @defer.inlineCallbacks + def test__get_boot_sources_calls_get_boot_sources_v2_before_v1(self): + clock = Clock() + client_call = Mock() + client_call.side_effect = [ + defer.succeed(dict(sources=sentinel.sources)), + ] + + service = ImageDownloadService( + sentinel.rpc, clock, sentinel.uuid) + sources = yield service._get_boot_sources(client_call) + self.assertEqual(sources.get('sources'), sentinel.sources) + self.assertThat( + client_call, + MockCalledOnceWith(GetBootSourcesV2, uuid=sentinel.uuid)) + + @defer.inlineCallbacks + def test__get_boot_sources_calls_get_boot_sources_v1_on_v2_missing(self): + clock = Clock() + client_call = Mock() + client_call.side_effect = [ + defer.fail(NoSuchMethod()), + defer.succeed(dict(sources=[])), + ] + + service = ImageDownloadService( + sentinel.rpc, clock, sentinel.uuid) + yield service._get_boot_sources(client_call) + self.assertThat( + client_call, + MockCallsMatch( + call(GetBootSourcesV2, uuid=sentinel.uuid), + call(GetBootSources, uuid=sentinel.uuid))) + + @defer.inlineCallbacks + def test__get_boot_sources_v1_sets_os_to_wildcard(self): + sources = [ + { + 'path': factory.make_url(), + 'selections': [ + { + 'release': "trusty", + 'arches': ["amd64"], + 'subarches': ["generic"], + 'labels': ["release"], + }, + { + 'release': "precise", + 'arches': ["amd64"], + 'subarches': ["generic"], + 'labels': ["release"], + }, + ], + }, + ] + + clock = Clock() + client_call = Mock() + client_call.side_effect = [ + defer.fail(NoSuchMethod()), + defer.succeed(dict(sources=sources)), + ] + + service = ImageDownloadService( + sentinel.rpc, clock, sentinel.uuid) + sources = yield service._get_boot_sources(client_call) + os_selections = [ + selection.get('os') + for source in sources['sources'] + for selection in source['selections'] + ] + self.assertEqual(['*', '*'], os_selections) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_lease_upload_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_lease_upload_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_lease_upload_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_lease_upload_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,204 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for src/provisioningserver/pserv_services/lease_upload_service.py""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from datetime import datetime + +from fixtures import FakeLogger +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from mock import ( + ANY, + call, + Mock, + sentinel, + ) +from provisioningserver import services +from provisioningserver.dhcp.leases import check_lease_changes +from provisioningserver.pserv_services import lease_upload_service +from provisioningserver.pserv_services.lease_upload_service import ( + convert_leases_to_mappings, + convert_mappings_to_leases, + LeaseUploadService, + ) +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.rpc.region import UpdateLeases +from provisioningserver.rpc.testing import ( + MockClusterToRegionRPCFixture, + TwistedLoggerFixture, + ) +from provisioningserver.testing.testcase import PservTestCase +from testtools.deferredruntest import extract_result +from twisted.application.internet import TimerService +from twisted.internet import defer +from twisted.internet.task import Clock + + +def make_random_lease(): + ip = factory.make_ipv4_address() + mac = factory.make_mac_address() + return {ip: mac} + + +def make_random_mapping(): + ip = factory.make_ipv4_address() + mac = factory.make_mac_address() + mapping = {"ip": ip, "mac": mac} + return mapping + + +class TestHelperFunctions(PservTestCase): + + def test_convert_leases_to_mappings_maps_correctly(self): + mappings = list() + for _ in xrange(3): + mappings.append(make_random_mapping()) + + # Convert to leases. + leases = convert_mappings_to_leases(mappings) + # Convert back and test against our original mappings. + observed = convert_leases_to_mappings(leases) + self.assertItemsEqual(mappings, observed) + + def test_convert_leases_to_mappings_converts_correctly(self): + leases = dict() + for _ in xrange(3): + leases.update(make_random_lease()) + + # Convert to mappings. + mappings = convert_leases_to_mappings(leases) + # Convert back and test against our original leases. + observed = convert_mappings_to_leases(mappings) + self.assertEqual(observed, leases) + + +class TestPeriodicImageDownloadService(PservTestCase): + + def test_init(self): + service = LeaseUploadService( + sentinel.service, sentinel.clock, sentinel.uuid) + self.assertIsInstance(service, TimerService) + self.assertIs(service.clock, sentinel.clock) + self.assertIs(service.uuid, sentinel.uuid) + self.assertIs(service.client_service, sentinel.service) + + def patch_upload(self, service, return_value=None): + patched = self.patch(service, '_get_client_and_start_upload') + patched.return_value = defer.succeed(return_value) + return patched + + def test_is_called_every_interval(self): + clock = Clock() + service = LeaseUploadService( + sentinel.service, clock, sentinel.uuid) + # Avoid actual uploads: + start_upload = self.patch_upload(service) + + # There are no calls before the service is started. + self.assertThat(start_upload, MockNotCalled()) + + service.startService() + + # The first call is issued at startup. + self.assertThat(start_upload, MockCalledOnceWith()) + + # Wind clock forward one second less than the desired interval. + clock.advance(service.check_interval - 1) + # No more periodic calls made. + self.assertThat(start_upload, MockCalledOnceWith()) + + # Wind clock forward one second, past the interval. + clock.advance(1) + + # Now there were two calls. + self.assertThat(start_upload, MockCallsMatch(call(), call())) + + # Forward another interval, should be three calls. + clock.advance(service.check_interval) + self.assertThat( + start_upload, MockCallsMatch(call(), call(), call())) + + def test_no_upload_if_no_rpc_connections(self): + rpc_client = Mock() + rpc_client.getClient.side_effect = NoConnectionsAvailable() + + clock = Clock() + service = LeaseUploadService( + rpc_client, clock, sentinel.uuid) + start_upload = self.patch(service, '_start_upload') + service.startService() + # Wind clock past all the retries. You can't do this in one big + # lump, it seems. The test looks like it passes, but the + # maybe_start_upload() method never returns properly. + clock.pump((5, 5, 5)) + self.assertThat(start_upload, MockNotCalled()) + + def test_upload_is_initiated(self): + # We're pretending to be the reactor in this thread. To ensure correct + # operation from things like the @asynchronous decorators we need to + # register as the IO thread. + self.register_as_io_thread() + + # Create a fixture for the region side of the RPC. + rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) + rpc_service = services.getServiceNamed('rpc') + server, io = rpc_fixture.makeEventLoop(UpdateLeases) + server.UpdateLeases.return_value = defer.succeed({}) + + # Create a mock response to "check_lease_changes()" + fake_lease = make_random_lease() + deferToThread = self.patch(lease_upload_service, 'deferToThread') + deferToThread.return_value = defer.succeed( + (datetime.now(), fake_lease),) + mappings = convert_leases_to_mappings(fake_lease) + + # Start the service. + uuid = factory.make_UUID() + service = LeaseUploadService(rpc_service, Clock(), uuid) + service.startService() + + # Gavin says that I need to pump my IO. I don't know what this + # means but it sounds important! + io.pump() + + # Ensure it called out to a new thread to get and parse the leases. + self.assertThat(deferToThread, MockCalledOnceWith(check_lease_changes)) + + # Ensure it sent them to the region using RPC. + self.assertThat( + server.UpdateLeases, + MockCalledOnceWith(ANY, uuid=uuid, mappings=mappings)) + + def test_logs_other_errors(self): + service = LeaseUploadService( + sentinel.rpc, Clock(), sentinel.uuid) + + _get_client_and_start_upload = self.patch_autospec( + service, "_get_client_and_start_upload") + _get_client_and_start_upload.return_value = defer.fail( + ZeroDivisionError("Such a shame I can't divide by zero")) + + with FakeLogger("maas") as maaslog, TwistedLoggerFixture(): + d = service.try_upload() + + self.assertEqual(None, extract_result(d)) + self.assertDocTestMatches( + "Failed to upload leases: " + "Such a shame I can't divide by zero", + maaslog.output) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_node_power_monitor_service.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_node_power_monitor_service.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_node_power_monitor_service.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_node_power_monitor_service.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,161 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for +:py:module:`~provisioningserver.pserv_services.node_power_monitor_service`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from fixtures import FakeLogger +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + ) +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +from mock import ( + ANY, + call, + ) +from provisioningserver.pserv_services import ( + node_power_monitor_service as npms, + ) +from provisioningserver.rpc import ( + exceptions, + region, + ) +from provisioningserver.rpc.testing import ( + MockClusterToRegionRPCFixture, + TwistedLoggerFixture, + ) +from testtools.deferredruntest import extract_result +from testtools.matchers import MatchesStructure +from twisted.internet.defer import ( + fail, + succeed, + ) +from twisted.internet.task import Clock + + +class TestNodePowerMonitorService(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_init_sets_up_timer_correctly(self): + cluster_uuid = factory.make_UUID() + service = npms.NodePowerMonitorService(cluster_uuid) + self.assertThat(service, MatchesStructure.byEquality( + call=(service.try_query_nodes, (cluster_uuid,), {}), + step=(5 * 60), clock=None)) + + def make_monitor_service(self): + cluster_uuid = factory.make_UUID() + service = npms.NodePowerMonitorService(cluster_uuid, Clock()) + return cluster_uuid, service + + def test_query_nodes_retries_getting_client(self): + cluster_uuid, service = self.make_monitor_service() + + getRegionClient = self.patch(npms, "getRegionClient") + getRegionClient.side_effect = exceptions.NoConnectionsAvailable + + def has_been_called_n_times(n): + calls = [call()] * n + return MockCallsMatch(*calls) + + maaslog = self.useFixture(FakeLogger("maas")) + + d = service.query_nodes(cluster_uuid) + # Immediately the first attempt to get a client happens. + self.assertThat(getRegionClient, has_been_called_n_times(1)) + self.assertFalse(d.called) + # Followed by 3 more attempts as time passes. + service.clock.pump((5, 5, 5)) + self.assertThat(getRegionClient, has_been_called_n_times(4)) + # query_nodes returns after 15 seconds. + self.assertTrue(d.called) + self.assertIsNone(extract_result(d)) + + # A simple message is logged, but even this may be too noisy. + self.assertIn( + "Cannot monitor nodes' power status; region not available.", + maaslog.output) + + def test_query_nodes_calls_the_region(self): + cluster_uuid, service = self.make_monitor_service() + + rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) + client, io = rpc_fixture.makeEventLoop(region.ListNodePowerParameters) + client.ListNodePowerParameters.return_value = succeed({"nodes": []}) + + d = service.query_nodes(cluster_uuid) + io.flush() + + self.assertEqual(None, extract_result(d)) + self.assertThat( + client.ListNodePowerParameters, + MockCalledOnceWith(ANY, uuid=cluster_uuid)) + + def test_query_nodes_calls_query_all_nodes(self): + cluster_uuid, service = self.make_monitor_service() + + rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) + client, io = rpc_fixture.makeEventLoop(region.ListNodePowerParameters) + client.ListNodePowerParameters.return_value = succeed({"nodes": []}) + + query_all_nodes = self.patch(npms, "query_all_nodes") + + d = service.query_nodes(cluster_uuid) + io.flush() + + self.assertEqual(None, extract_result(d)) + self.assertThat( + query_all_nodes, + MockCalledOnceWith( + [], max_concurrency=service.max_nodes_at_once, + clock=service.clock)) + + def test_query_nodes_copes_with_NoSuchCluster(self): + cluster_uuid, service = self.make_monitor_service() + + rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) + client, io = rpc_fixture.makeEventLoop(region.ListNodePowerParameters) + client.ListNodePowerParameters.return_value = fail( + exceptions.NoSuchCluster.from_uuid(cluster_uuid)) + + d = service.query_nodes(cluster_uuid) + with FakeLogger("maas") as maaslog: + io.flush() + + self.assertEqual(None, extract_result(d)) + self.assertDocTestMatches( + "This cluster (...) is not recognised by the region.", + maaslog.output) + + def test_try_query_nodes_logs_other_errors(self): + cluster_uuid, service = self.make_monitor_service() + + query_nodes = self.patch(service, "query_nodes") + query_nodes.return_value = fail( + ZeroDivisionError("Such a shame I can't divide by zero")) + + with FakeLogger("maas") as maaslog, TwistedLoggerFixture(): + d = service.try_query_nodes(cluster_uuid) + + self.assertEqual(None, extract_result(d)) + self.assertDocTestMatches( + "Failed to query nodes' power status: " + "Such a shame I can't divide by zero", + maaslog.output) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_tftp.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_tftp.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tests/test_tftp.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tests/test_tftp.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,490 @@ +# Copyright 2005-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the maastftp Twisted plugin.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from functools import partial +import json +import os +from socket import ( + AF_INET, + AF_INET6, + ) +from urllib import urlencode +from urlparse import ( + parse_qsl, + urlparse, + ) + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +import mock +from mock import ( + sentinel, + ANY, + ) +from netaddr import IPNetwork +from netaddr.ip import ( + IPV4_LINK_LOCAL, + IPV6_LINK_LOCAL, + ) +from provisioningserver.boot import BytesReader +from provisioningserver.boot.pxe import PXEBootMethod +from provisioningserver.boot.tests.test_pxe import compose_config_path +from provisioningserver.events import EVENT_TYPES +from provisioningserver.pserv_services import tftp as tftp_module +from provisioningserver.pserv_services.tftp import ( + Port, + TFTPBackend, + TFTPService, + UDPServer, + ) +from provisioningserver.tests.test_kernel_opts import make_kernel_parameters +from testtools.matchers import ( + AfterPreprocessing, + AllMatch, + Equals, + IsInstance, + MatchesAll, + MatchesStructure, + ) +from tftp.backend import IReader +from tftp.protocol import TFTP +from twisted.application import internet +from twisted.application.service import MultiService +from twisted.internet import reactor +from twisted.internet.address import ( + IPv4Address, + IPv6Address, + ) +from twisted.internet.defer import ( + inlineCallbacks, + succeed, + ) +from twisted.internet.protocol import Protocol +from twisted.python import context +from zope.interface.verify import verifyObject + + +class TestBytesReader(MAASTestCase): + """Tests for `provisioningserver.tftp.BytesReader`.""" + + def test_interfaces(self): + reader = BytesReader(b"") + self.addCleanup(reader.finish) + verifyObject(IReader, reader) + + def test_read(self): + data = factory.make_string(size=10).encode("ascii") + reader = BytesReader(data) + self.addCleanup(reader.finish) + self.assertEqual(data[:7], reader.read(7)) + self.assertEqual(data[7:], reader.read(7)) + self.assertEqual(b"", reader.read(7)) + + def test_finish(self): + reader = BytesReader(b"1234") + reader.finish() + self.assertRaises(ValueError, reader.read, 1) + + +class TestTFTPBackend(MAASTestCase): + """Tests for `provisioningserver.tftp.TFTPBackend`.""" + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_init(self): + temp_dir = self.make_dir() + generator_url = "http://%s.example.com/%s" % ( + factory.make_name("domain"), factory.make_name("path")) + backend = TFTPBackend(temp_dir, generator_url) + self.assertEqual((True, False), (backend.can_read, backend.can_write)) + self.assertEqual(temp_dir, backend.base.path) + self.assertEqual(generator_url, backend.generator_url.geturl()) + + def test_get_generator_url(self): + # get_generator_url() merges the parameters obtained from the request + # file path (arch, subarch, name) into the configured generator URL. + mac = factory.make_mac_address("-") + dummy = factory.make_name("dummy").encode("ascii") + backend_url = b"http://example.com/?" + urlencode({b"dummy": dummy}) + backend = TFTPBackend(self.make_dir(), backend_url) + # params is an example of the parameters obtained from a request. + params = {"mac": mac} + generator_url = urlparse(backend.get_generator_url(params)) + self.assertEqual("example.com", generator_url.hostname) + query = parse_qsl(generator_url.query) + query_expected = [ + ("dummy", dummy), + ("mac", mac), + ] + self.assertItemsEqual(query_expected, query) + + def get_reader(self, data): + temp_file = self.make_file(name="example", contents=data) + temp_dir = os.path.dirname(temp_file) + backend = TFTPBackend(temp_dir, "http://nowhere.example.com/") + return backend.get_reader("example") + + @inlineCallbacks + def test_get_reader_regular_file(self): + # TFTPBackend.get_reader() returns a regular FilesystemReader for + # paths not matching re_config_file. + self.patch(tftp_module, 'send_event_node_mac_address') + self.patch(tftp_module, 'get_remote_mac') + data = factory.make_string().encode("ascii") + reader = yield self.get_reader(data) + self.addCleanup(reader.finish) + self.assertEqual(len(data), reader.size) + self.assertEqual(data, reader.read(len(data))) + self.assertEqual(b"", reader.read(1)) + + @inlineCallbacks + def test_get_reader_handles_backslashes_in_path(self): + self.patch(tftp_module, 'send_event_node_mac_address') + self.patch(tftp_module, 'get_remote_mac') + + data = factory.make_string().encode("ascii") + temp_dir = self.make_dir() + subdir = factory.make_name('subdir') + filename = factory.make_name('file') + os.mkdir(os.path.join(temp_dir, subdir)) + factory.make_file(os.path.join(temp_dir, subdir), filename, data) + + path = '\\%s\\%s' % (subdir, filename) + backend = TFTPBackend(temp_dir, "http://nowhere.example.com/") + reader = yield backend.get_reader(path) + + self.addCleanup(reader.finish) + self.assertEqual(len(data), reader.size) + self.assertEqual(data, reader.read(len(data))) + self.assertEqual(b"", reader.read(1)) + + @inlineCallbacks + def test_get_reader_logs_node_event_with_mac_address(self): + mac_address = factory.make_mac_address() + self.patch_autospec(tftp_module, 'send_event_node_mac_address') + self.patch(tftp_module, 'get_remote_mac').return_value = mac_address + data = factory.make_string().encode("ascii") + reader = yield self.get_reader(data) + self.addCleanup(reader.finish) + self.assertThat( + tftp_module.send_event_node_mac_address, + MockCalledOnceWith( + event_type=EVENT_TYPES.NODE_TFTP_REQUEST, + mac_address=mac_address, description=ANY)) + + @inlineCallbacks + def test_get_reader_does_not_log_when_mac_cannot_be_found(self): + self.patch_autospec(tftp_module, 'send_event_node_mac_address') + self.patch(tftp_module, 'get_remote_mac').return_value = None + data = factory.make_string().encode("ascii") + reader = yield self.get_reader(data) + self.addCleanup(reader.finish) + self.assertThat( + tftp_module.send_event_node_mac_address, + MockNotCalled()) + + @inlineCallbacks + def test_get_render_file(self): + # For paths matching PXEBootMethod.match_path, TFTPBackend.get_reader() + # returns a Deferred that will yield a BytesReader. + cluster_uuid = factory.make_UUID() + self.patch(tftp_module, 'get_cluster_uuid').return_value = ( + cluster_uuid) + self.patch(tftp_module, 'send_event_node_mac_address') + mac = factory.make_mac_address("-") + config_path = compose_config_path(mac) + backend = TFTPBackend(self.make_dir(), b"http://example.com/") + # python-tx-tftp sets up call context so that backends can discover + # more about the environment in which they're running. + call_context = { + "local": ( + factory.make_ipv4_address(), + factory.pick_port()), + "remote": ( + factory.make_ipv4_address(), + factory.pick_port()), + } + + @partial(self.patch, backend, "get_boot_method_reader") + def get_boot_method_reader(boot_method, params): + params_json = json.dumps(params) + params_json_reader = BytesReader(params_json) + return succeed(params_json_reader) + + reader = yield context.call( + call_context, backend.get_reader, config_path) + output = reader.read(10000) + # The addresses provided by python-tx-tftp in the call context are + # passed over the wire as address:port strings. + expected_params = { + "mac": mac, + "local": call_context["local"][0], # address only. + "remote": call_context["remote"][0], # address only. + "cluster_uuid": cluster_uuid, + } + observed_params = json.loads(output) + self.assertEqual(expected_params, observed_params) + + @inlineCallbacks + def test_get_boot_method_reader_returns_rendered_params(self): + # get_boot_method_reader() takes a dict() of parameters and returns an + # `IReader` of a PXE configuration, rendered by + # `PXEBootMethod.get_reader`. + backend = TFTPBackend(self.make_dir(), b"http://example.com/") + # Fake configuration parameters, as discovered from the file path. + fake_params = {"mac": factory.make_mac_address("-")} + # Fake kernel configuration parameters, as returned from the API call. + fake_kernel_params = make_kernel_parameters() + + # Stub get_page to return the fake API configuration parameters. + fake_get_page_result = json.dumps(fake_kernel_params._asdict()) + get_page_patch = self.patch(backend, "get_page") + get_page_patch.return_value = succeed(fake_get_page_result) + + # Stub get_reader to return the render parameters. + method = PXEBootMethod() + fake_render_result = factory.make_name("render").encode("utf-8") + render_patch = self.patch(method, "get_reader") + render_patch.return_value = BytesReader(fake_render_result) + + # Get the rendered configuration, which will actually be a JSON dump + # of the render-time parameters. + reader = yield backend.get_boot_method_reader(method, fake_params) + self.addCleanup(reader.finish) + self.assertIsInstance(reader, BytesReader) + output = reader.read(10000) + + # The kernel parameters were fetched using `backend.get_page`. + self.assertThat(backend.get_page, MockCalledOnceWith(mock.ANY)) + + # The result has been rendered by `method.get_reader`. + self.assertEqual(fake_render_result.encode("utf-8"), output) + self.assertThat(method.get_reader, MockCalledOnceWith( + backend, kernel_params=fake_kernel_params, **fake_params)) + + @inlineCallbacks + def test_get_boot_method_render_substitutes_armhf_in_params(self): + # get_config_reader() should substitute "arm" for "armhf" in the + # arch field of the parameters (mapping from pxe to maas + # namespace). + cluster_uuid = factory.make_UUID() + self.patch(tftp_module, 'get_cluster_uuid').return_value = ( + cluster_uuid) + self.patch(tftp_module, 'send_event_node_mac_address') + config_path = "pxelinux.cfg/default-arm" + backend = TFTPBackend(self.make_dir(), b"http://example.com/") + # python-tx-tftp sets up call context so that backends can discover + # more about the environment in which they're running. + call_context = { + "local": ( + factory.make_ipv4_address(), + factory.pick_port()), + "remote": ( + factory.make_ipv4_address(), + factory.pick_port()), + } + + @partial(self.patch, backend, "get_boot_method_reader") + def get_boot_method_reader(boot_method, params): + params_json = json.dumps(params) + params_json_reader = BytesReader(params_json) + return succeed(params_json_reader) + + reader = yield context.call( + call_context, backend.get_reader, config_path) + output = reader.read(10000) + observed_params = json.loads(output) + self.assertEqual("armhf", observed_params["arch"]) + + +class TestTFTPService(MAASTestCase): + + def test_tftp_service(self): + # A TFTP service is configured and added to the top-level service. + interfaces = [ + factory.make_ipv4_address(), + factory.make_ipv6_address(), + ] + self.patch( + tftp_module, "get_all_interface_addresses", + lambda: interfaces) + example_root = self.make_dir() + example_generator = "http://example.com/generator" + example_port = factory.pick_port() + tftp_service = TFTPService( + resource_root=example_root, generator=example_generator, + port=example_port) + tftp_service.updateServers() + # The "tftp" service is a multi-service containing UDP servers for + # each interface defined by get_all_interface_addresses(). + self.assertIsInstance(tftp_service, MultiService) + # There's also a TimerService that updates the servers every 45s. + self.assertThat( + tftp_service.refresher, MatchesStructure.byEquality( + step=45, parent=tftp_service, name="refresher", + call=(tftp_service.updateServers, (), {}), + )) + expected_backend = MatchesAll( + IsInstance(TFTPBackend), + AfterPreprocessing( + lambda backend: backend.base.path, + Equals(example_root)), + AfterPreprocessing( + lambda backend: backend.generator_url.geturl(), + Equals(example_generator))) + expected_protocol = MatchesAll( + IsInstance(TFTP), + AfterPreprocessing( + lambda protocol: protocol.backend, + expected_backend)) + expected_server = MatchesAll( + IsInstance(internet.UDPServer), + AfterPreprocessing( + lambda service: len(service.args), + Equals(2)), + AfterPreprocessing( + lambda service: service.args[0], # port + Equals(example_port)), + AfterPreprocessing( + lambda service: service.args[1], # protocol + expected_protocol)) + self.assertThat( + tftp_service.getServers(), + AllMatch(expected_server)) + # Only the interface used for each service differs. + self.assertItemsEqual( + [svc.kwargs for svc in tftp_service.getServers()], + [{"interface": interface} for interface in interfaces]) + + def test_tftp_service_rebinds_on_HUP(self): + # Initial set of interfaces to bind to. + interfaces = {"1.1.1.1", "2.2.2.2"} + self.patch( + tftp_module, "get_all_interface_addresses", + lambda: interfaces) + + tftp_service = TFTPService( + resource_root=self.make_dir(), generator="http://mighty/wind", + port=factory.pick_port()) + tftp_service.updateServers() + + # The child services of tftp_services are named after the + # interface they bind to. + self.assertEqual(interfaces, { + server.name for server in tftp_service.getServers() + }) + + # Update the set of interfaces to bind to. + interfaces.add("3.3.3.3") + interfaces.remove("1.1.1.1") + + # Ask the TFTP service to update its set of servers. + tftp_service.updateServers() + + # We're in the reactor thread but we want to move the reactor + # forwards, hence we need to get all explicit about it. + reactor.runUntilCurrent() + + # The interfaces now bound match the updated interfaces set. + self.assertEqual(interfaces, { + server.name for server in tftp_service.getServers() + }) + + def test_tftp_service_does_not_bind_to_link_local_addresses(self): + # Initial set of interfaces to bind to. + ipv4_test_net_3 = IPNetwork("203.0.113.0/24") # RFC 5737 + normal_addresses = { + factory.pick_ip_in_network(ipv4_test_net_3), + factory.make_ipv6_address(), + } + link_local_addresses = { + factory.pick_ip_in_network(IPV4_LINK_LOCAL), + factory.pick_ip_in_network(IPV6_LINK_LOCAL), + } + self.patch( + tftp_module, "get_all_interface_addresses", + lambda: normal_addresses | link_local_addresses) + + tftp_service = TFTPService( + resource_root=self.make_dir(), generator="http://mighty/wind", + port=factory.pick_port()) + tftp_service.updateServers() + + # Only the "normal" addresses have been used. + self.assertEqual(normal_addresses, { + server.name for server in tftp_service.getServers() + }) + + +class DummyProtocol(Protocol): + def doStop(self): + pass + + +class TestPort(MAASTestCase): + """Tests for :py:class:`Port`.""" + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_getHost_works_with_IPv4_address(self): + port = Port(0, DummyProtocol(), "127.0.0.1") + port.addressFamily = AF_INET + port.startListening() + self.addCleanup(port.stopListening) + self.assertEqual( + IPv4Address('UDP', '127.0.0.1', port._realPortNumber), + port.getHost()) + + def test_getHost_works_with_IPv6_address(self): + port = Port(0, DummyProtocol(), "::1") + port.addressFamily = AF_INET6 + port.startListening() + self.addCleanup(port.stopListening) + self.assertEqual( + IPv6Address('UDP', '::1', port._realPortNumber), + port.getHost()) + + +class TestUDPServer(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test__getPort_calls__listenUDP_with_args_from_constructor(self): + server = UDPServer(sentinel.foo, bar=sentinel.bar) + _listenUDP = self.patch(server, "_listenUDP") + _listenUDP.return_value = sentinel.port + self.assertEqual(sentinel.port, server._getPort()) + self.assertThat(_listenUDP, MockCalledOnceWith( + sentinel.foo, bar=sentinel.bar)) + + def test__listenUDP_with_IPv4_address(self): + server = UDPServer(0, DummyProtocol(), "127.0.0.1") + port = server._getPort() + self.addCleanup(port.stopListening) + self.assertEqual(AF_INET, port.addressFamily) + + def test__listenUDP_with_IPv6_address(self): + server = UDPServer(0, DummyProtocol(), "::1") + port = server._getPort() + self.addCleanup(port.stopListening) + self.assertEqual(AF_INET6, port.addressFamily) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tftp.py maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tftp.py --- maas-1.5.4+bzr2294/src/provisioningserver/pserv_services/tftp.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/pserv_services/tftp.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,330 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Twisted Application Plugin for the MAAS TFTP server.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "TFTPBackend", + "TFTPService", + ] + +from functools import partial +import httplib +import json +from socket import ( + AF_INET, + AF_INET6, + ) +from urllib import urlencode +from urlparse import ( + parse_qsl, + urlparse, + ) + +from netaddr import IPAddress +from provisioningserver.boot import ( + BootMethodRegistry, + get_remote_mac, + ) +from provisioningserver.cluster_config import get_cluster_uuid +from provisioningserver.drivers import ArchitectureRegistry +from provisioningserver.events import ( + EVENT_TYPES, + send_event_node_mac_address, + ) +from provisioningserver.kernel_opts import KernelParameters +from provisioningserver.utils.network import get_all_interface_addresses +from provisioningserver.utils.twisted import deferred +from tftp.backend import FilesystemSynchronousBackend +from tftp.errors import FileNotFound +from tftp.protocol import TFTP +from twisted.application import internet +from twisted.application.service import MultiService +from twisted.internet import udp +from twisted.internet.abstract import isIPv6Address +from twisted.internet.address import ( + IPv4Address, + IPv6Address, + ) +from twisted.internet.defer import ( + inlineCallbacks, + maybeDeferred, + returnValue, + ) +from twisted.python.context import get +from twisted.web.client import getPage +import twisted.web.error + + +class TFTPBackend(FilesystemSynchronousBackend): + """A partially dynamic read-only TFTP server. + + Static files such as kernels and initrds, as well as any non-MAAS files + that the system may already be set up to serve, are served up normally. + But PXE configurations are generated on the fly. + + When a PXE configuration file is requested, the server asynchronously + requests the appropriate parameters from the API (at a configurable + "generator URL") and generates a config file based on those. + + The regular expressions `re_config_file` and `re_mac_address` specify + which files the server generates on the fly. Any other requests are + passed on to the filesystem. + + Passing requests on to the API must be done very selectively, because + failures cause the boot process to halt. This is why the expression for + matching the MAC address is so narrowly defined: PXELINUX attempts to + fetch files at many similar paths which must not be passed on. + """ + + get_page = staticmethod(getPage) + + def __init__(self, base_path, generator_url): + """ + :param base_path: The root directory for this TFTP server. + :param generator_url: The URL which can be queried for the PXE + config. See `get_generator_url` for the types of queries it is + expected to accept. + """ + super(TFTPBackend, self).__init__( + base_path, can_read=True, can_write=False) + self.generator_url = urlparse(generator_url) + + def get_generator_url(self, params): + """Calculate the URL, including query, from which we can fetch + additional configuration parameters. + + :param params: A dict, or iterable suitable for updating a dict, of + additional query parameters. + """ + query = {} + # Merge parameters from the generator URL. + query.update(parse_qsl(self.generator_url.query)) + # Merge parameters obtained from the request. + query.update(params) + # Merge updated query into the generator URL. + url = self.generator_url._replace(query=urlencode(query)) + # TODO: do something more intelligent with unicode URLs here; see + # apiclient.utils.ascii_url() for inspiration. + return url.geturl().encode("ascii") + + @inlineCallbacks + def get_boot_method(self, file_name): + """Finds the correct boot method.""" + for _, method in BootMethodRegistry: + params = yield maybeDeferred(method.match_path, self, file_name) + if params is not None: + returnValue((method, params)) + returnValue((None, None)) + + @deferred + def get_kernel_params(self, params): + """Return kernel parameters obtained from the API. + + :param params: Parameters so far obtained, typically from the file + path requested. + :return: A `KernelParameters` instance. + """ + url = self.get_generator_url(params) + + def reassemble(data): + return KernelParameters(**data) + + d = self.get_page(url) + d.addCallback(json.loads) + d.addCallback(reassemble) + return d + + @deferred + def get_boot_method_reader(self, boot_method, params): + """Return an `IReader` for a boot method. + + :param boot_method: Boot method that is generating the config + :param params: Parameters so far obtained, typically from the file + path requested. + """ + def generate(kernel_params): + return boot_method.get_reader( + self, kernel_params=kernel_params, **params) + + d = self.get_kernel_params(params) + d.addCallback(generate) + return d + + @staticmethod + def get_page_errback(failure, file_name): + failure.trap(twisted.web.error.Error) + # This twisted.web.error.Error.status object ends up being a + # string for some reason, but the constants we can compare against + # (both in httplib and twisted.web.http) are ints. + try: + status_int = int(failure.value.status) + except ValueError: + # Assume that it's some other error and propagate it + return failure + + if status_int == httplib.NO_CONTENT: + # Convert HTTP No Content to a TFTP file not found + raise FileNotFound(file_name) + else: + # Otherwise propogate the unknown error + return failure + + @deferred + def handle_boot_method(self, file_name, result): + boot_method, params = result + if boot_method is None: + return super(TFTPBackend, self).get_reader(file_name) + + # Map pxe namespace architecture names to MAAS's. + arch = params.get("arch") + if arch is not None: + maasarch = ArchitectureRegistry.get_by_pxealias(arch) + if maasarch is not None: + params["arch"] = maasarch.name.split("/")[0] + + # Send the local and remote endpoint addresses. + local_host, local_port = get("local", (None, None)) + params["local"] = local_host + remote_host, remote_port = get("remote", (None, None)) + params["remote"] = remote_host + params["cluster_uuid"] = get_cluster_uuid() + d = self.get_boot_method_reader(boot_method, params) + return d + + @deferred + def get_reader(self, file_name): + """See `IBackend.get_reader()`. + + If `file_name` matches a boot method then the response is obtained + from that boot method. Otherwise the filesystem is used to service + the response. + """ + # It is possible for a client to request the file with '\' instead + # of '/', example being 'bootx64.efi'. Convert all '\' to '/' to be + # unix compatiable. + file_name = file_name.replace('\\', '/') + mac_address = get_remote_mac() + if mac_address is not None: + send_event_node_mac_address( + event_type=EVENT_TYPES.NODE_TFTP_REQUEST, + mac_address=mac_address, description=file_name) + d = self.get_boot_method(file_name) + d.addCallback(partial(self.handle_boot_method, file_name)) + d.addErrback(self.get_page_errback, file_name) + return d + + +class Port(udp.Port): + """A :py:class:`udp.Port` that groks IPv6.""" + + # This must be set by call sites. + addressFamily = None + + def getHost(self): + """See :py:meth:`twisted.internet.udp.Port.getHost`.""" + host, port = self.socket.getsockname()[:2] + addr_type = IPv6Address if isIPv6Address(host) else IPv4Address + return addr_type('UDP', host, port) + + +class UDPServer(internet.UDPServer): + """A :py:class:`~internet.UDPServer` that groks IPv6. + + This creates the port directly instead of using the reactor's + ``listenUDP`` method so that we can do a switcharoo to our own + IPv6-enabled port implementation. + """ + + def _getPort(self): + """See :py:meth:`twisted.application.internet.UDPServer._getPort`.""" + return self._listenUDP(*self.args, **self.kwargs) + + def _listenUDP(self, port, protocol, interface='', maxPacketSize=8192): + """See :py:meth:`twisted.internet.reactor.listenUDP`.""" + p = Port(port, protocol, interface, maxPacketSize) + p.addressFamily = AF_INET6 if isIPv6Address(interface) else AF_INET + p.startListening() + return p + + +class TFTPService(MultiService, object): + """An umbrella service representing a set of running TFTP servers. + + Creates a UDP server individually for each discovered network + interface, so that we can detect the interface via which we have + received a datagram. + + It then periodically updates the servers running in case there's a + change to the host machine's network configuration. + + :ivar backend: The :class:`TFTPBackend` being used to service TFTP + requests. + + :ivar port: The port on which each server is started. + + :ivar refresher: A :class:`TimerService` that calls + ``updateServers`` periodically. + + """ + + def __init__(self, resource_root, port, generator): + """ + :param resource_root: The root directory for this TFTP server. + :param port: The port on which each server should be started. + :param generator: The URL to be queried for PXE configuration. + This will normally point to the `pxeconfig` endpoint on the + region-controller API. + """ + super(TFTPService, self).__init__() + self.backend = TFTPBackend(resource_root, generator) + self.port = port + # Establish a periodic call to self.updateServers() every 45 + # seconds, so that this service eventually converges on truth. + # TimerService ensures that a call is made to it's target + # function immediately as it's started, so there's no need to + # call updateServers() from here. + self.refresher = internet.TimerService(45, self.updateServers) + self.refresher.setName("refresher") + self.refresher.setServiceParent(self) + + def getServers(self): + """Return a set of all configured servers. + + :rtype: :class:`set` of :class:`internet.UDPServer` + """ + return { + service for service in self + if service is not self.refresher + } + + def updateServers(self): + """Run a server on every interface. + + For each configured network interface this will start a TFTP + server. If called later it will bring up servers on newly + configured interfaces and bring down servers on deconfigured + interfaces. + """ + addrs_established = set(service.name for service in self.getServers()) + addrs_desired = set(get_all_interface_addresses()) + + for address in addrs_desired - addrs_established: + if not IPAddress(address).is_link_local(): + tftp_service = UDPServer( + self.port, TFTP(self.backend), interface=address) + tftp_service.setName(address) + tftp_service.setServiceParent(self) + + for address in addrs_established - addrs_desired: + tftp_service = self.getServiceNamed(address) + tftp_service.disownServiceParent() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/arguments.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/arguments.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/arguments.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/arguments.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,15 +13,98 @@ __metaclass__ = type __all__ = [ + "Bytes", + "Choice", "StructureAsJSON", + "ParsedURL", ] +import collections import json +import urlparse import zlib +from apiclient.utils import ascii_url from twisted.protocols import amp +class Bytes(amp.Argument): + """Encode a structure on the wire as bytes. + + In truth, this does nothing more than assert that the inputs are + always bytes. + """ + + def toString(self, inObject): + if not isinstance(inObject, bytes): + raise TypeError("Not a byte string: %r" % (inObject,)) + return inObject + + def fromString(self, inString): + # inString is always a byte string, as defined by amp.Argument. + return inString + + +class Choice(amp.Argument): + """Encode a choice to a predefined bytestring on the wire.""" + + def __init__(self, choices, optional=False): + """Default constructor. + + :param choices: A :py:class:`~collections.Mapping` of possible + choices. The keys can be any Python object suitable for use + as a mapping key, but the values must be byte strings. On + the wire the Python objects will be represented by those + byte strings, and mapped back at the receiving end. + """ + super(Choice, self).__init__(optional=optional) + if not isinstance(choices, collections.Mapping): + raise TypeError("Not a mapping: %r" % (choices,)) + not_byte_strings = sorted( + value for value in choices.itervalues() + if not isinstance(value, bytes)) + if len(not_byte_strings) != 0: + raise TypeError("Not byte strings: %s" % ", ".join( + repr(value) for value in not_byte_strings)) + self._encode = {name: value for name, value in choices.iteritems()} + self._decode = {value: name for name, value in choices.iteritems()} + + def toString(self, inObject): + return self._encode[inObject] + + def fromString(self, inString): + return self._decode[inString] + + +class ParsedURL(amp.Argument): + """Encode a URL on the wire. + + The URL should be an instance of :py:class:`~urlparse.ParseResult` + or :py:class:`~urlparse.SplitResult` for encoding. When decoding, + :py:class:`~urlparse.ParseResult` is always returned. + """ + + def toString(self, inObject): + """Encode a URL-like object into an ASCII URL. + + :raise TypeError: If `inObject` is not a URL-like object + (meaning it doesn't have a `geturl` method). + """ + try: + geturl = inObject.geturl + except AttributeError: + raise TypeError("Not a URL-like object: %r" % (inObject,)) + else: + return ascii_url(geturl()) + + def fromString(self, inString): + """Decode an ASCII URL into a URL-like object. + + :return: :py:class:`~urlparse.ParseResult` + """ + return urlparse.urlparse(inString) + + class StructureAsJSON(amp.Argument): """Encode a structure on the wire as JSON, compressed with zlib. @@ -36,3 +119,20 @@ def fromString(self, inString): return json.loads(zlib.decompress(inString)) + + +class CompressedAmpList(amp.AmpList): + """An :py:class:`amp.AmpList` that's compressed on the wire. + + The serialised form is transparently compressed and decompressed with + zlib. This can be useful when there's a lot of repetition in the list + being transmitted. + """ + + def toStringProto(self, inObject, proto): + toStringProto = super(CompressedAmpList, self).toStringProto + return zlib.compress(toStringProto(inObject, proto)) + + def fromStringProto(self, inString, proto): + fromStringProto = super(CompressedAmpList, self).fromStringProto + return fromStringProto(zlib.decompress(inString), proto) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/boot_images.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/boot_images.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/boot_images.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/boot_images.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,105 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""RPC relating to boot images.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "import_boot_images", + "list_boot_images", + "is_import_boot_images_running", + ] + +import os +from urlparse import urlparse + +from provisioningserver import concurrency +from provisioningserver.auth import get_maas_user_gpghome +from provisioningserver.boot import tftppath +from provisioningserver.config import BOOT_RESOURCES_STORAGE +from provisioningserver.import_images import boot_resources +from provisioningserver.utils.env import environment_variables +from provisioningserver.utils.twisted import synchronous +from twisted.internet.threads import deferToThread + + +CACHED_BOOT_IMAGES = None + + +def list_boot_images(): + """List the boot images that exist on the cluster. + + This return value of this function is cached. This helps reduce the amount + of IO, as this function is called often. To update the cache call + `reload_boot_images`. + """ + global CACHED_BOOT_IMAGES + if CACHED_BOOT_IMAGES is None: + CACHED_BOOT_IMAGES = tftppath.list_boot_images( + os.path.join(BOOT_RESOURCES_STORAGE, 'current')) + return CACHED_BOOT_IMAGES + + +def reload_boot_images(): + """Update the cached boot images so `list_boot_images` returns the + most up-to-date boot images list.""" + global CACHED_BOOT_IMAGES + CACHED_BOOT_IMAGES = tftppath.list_boot_images( + os.path.join(BOOT_RESOURCES_STORAGE, 'current')) + + +def get_hosts_from_sources(sources): + """Return set of hosts that are contained in the given sources.""" + hosts = set() + for source in sources: + url = urlparse(source['url']) + if url.hostname is not None: + hosts.add(url.hostname) + return hosts + + +@synchronous +def _run_import(sources, http_proxy=None, https_proxy=None): + """Run the import. + + This is function is synchronous so it must be called with deferToThread. + """ + variables = { + 'GNUPGHOME': get_maas_user_gpghome(), + } + if http_proxy is not None: + variables['http_proxy'] = http_proxy + if https_proxy is not None: + variables['https_proxy'] = https_proxy + # Communication to the sources and loopback should not go through proxy. + no_proxy_hosts = ["localhost", "127.0.0.1", "::1"] + no_proxy_hosts += list(get_hosts_from_sources(sources)) + variables['no_proxy'] = ','.join(no_proxy_hosts) + with environment_variables(variables): + boot_resources.import_images(sources) + + # Update the boot images cache so `list_boot_images` returns the + # correct information. + reload_boot_images() + + +def import_boot_images(sources, http_proxy=None, https_proxy=None): + """Imports the boot images from the given sources.""" + lock = concurrency.boot_images + if not lock.locked: + return lock.run( + deferToThread, _run_import, sources, + http_proxy=http_proxy, https_proxy=https_proxy) + + +def is_import_boot_images_running(): + """Return True if the import process is currently running.""" + return concurrency.boot_images.locked diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/cluster.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/cluster.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/cluster.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/cluster.py 2015-07-10 01:27:14.000000000 +0000 @@ -16,34 +16,66 @@ __metaclass__ = type __all__ = [ + "Authenticate", + "ComposeCurtinNetworkPreseed", + "ConfigureDHCPv4", + "ConfigureDHCPv6", + "CreateHostMaps", "DescribePowerTypes", + "GetPreseedData", "Identify", "ListBootImages", + "ListOperatingSystems", "ListSupportedArchitectures", + "PowerOff", + "PowerOn", + "PowerQuery", + "ValidateLicenseKey", ] -from provisioningserver.rpc.arguments import StructureAsJSON -from provisioningserver.rpc.common import Identify +from provisioningserver.power.poweraction import ( + PowerActionFail, + UnknownPowerType, + ) +from provisioningserver.rpc import exceptions +from provisioningserver.rpc.arguments import ( + Bytes, + ParsedURL, + StructureAsJSON, + ) +from provisioningserver.rpc.common import ( + Authenticate, + Identify, + ) from twisted.protocols import amp class ListBootImages(amp.Command): - """List the boot images available on this cluster controller.""" + """List the boot images available on this cluster controller. + + :since: 1.5 + """ arguments = [] response = [ (b"images", amp.AmpList( - [(b"architecture", amp.Unicode()), + [(b"osystem", amp.Unicode()), + (b"architecture", amp.Unicode()), (b"subarchitecture", amp.Unicode()), (b"release", amp.Unicode()), (b"label", amp.Unicode()), - (b"purpose", amp.Unicode())])) + (b"purpose", amp.Unicode()), + (b"xinstall_type", amp.Unicode()), + (b"xinstall_path", amp.Unicode())])) ] errors = [] class DescribePowerTypes(amp.Command): - """Get a JSON Schema describing this cluster's power types.""" + """Get a JSON Schema describing this cluster's power types. + + :since: 1.5 + """ arguments = [] response = [ @@ -53,6 +85,11 @@ class ListSupportedArchitectures(amp.Command): + """Report the cluster's supported architectures. + + :since: 1.5 + """ + arguments = [] response = [ (b"architectures", amp.AmpList([ @@ -61,3 +98,385 @@ ])), ] errors = [] + + +class ListOperatingSystems(amp.Command): + """Report the cluster's supported operating systems. + + :since: 1.7 + """ + + arguments = [] + response = [ + (b"osystems", amp.AmpList([ + (b"name", amp.Unicode()), + (b"title", amp.Unicode()), + (b"releases", amp.AmpList([ + (b"name", amp.Unicode()), + (b"title", amp.Unicode()), + (b"requires_license_key", amp.Boolean()), + (b"can_commission", amp.Boolean()), + ])), + (b"default_release", amp.Unicode(optional=True)), + (b"default_commissioning_release", amp.Unicode(optional=True)), + ])), + ] + errors = [] + + +class GetOSReleaseTitle(amp.Command): + """Get the title for the operating systems release. + + :since: 1.7 + """ + + arguments = [ + (b"osystem", amp.Unicode()), + (b"release", amp.Unicode()), + ] + response = [ + (b"title", amp.Unicode()), + ] + errors = { + exceptions.NoSuchOperatingSystem: ( + b"NoSuchOperatingSystem"), + } + + +class ValidateLicenseKey(amp.Command): + """Validate an OS license key. + + :since: 1.7 + """ + + arguments = [ + (b"osystem", amp.Unicode()), + (b"release", amp.Unicode()), + (b"key", amp.Unicode()), + ] + response = [ + (b"is_valid", amp.Boolean()), + ] + errors = { + exceptions.NoSuchOperatingSystem: ( + b"NoSuchOperatingSystem"), + } + + +class GetPreseedData(amp.Command): + """Get OS-specific preseed data. + + :since: 1.7 + """ + + arguments = [ + (b"osystem", amp.Unicode()), + (b"preseed_type", amp.Unicode()), + (b"node_system_id", amp.Unicode()), + (b"node_hostname", amp.Unicode()), + (b"consumer_key", amp.Unicode()), + (b"token_key", amp.Unicode()), + (b"token_secret", amp.Unicode()), + (b"metadata_url", ParsedURL()), + ] + response = [ + (b"data", StructureAsJSON()), + ] + errors = { + exceptions.NoSuchOperatingSystem: ( + b"NoSuchOperatingSystem"), + NotImplementedError: ( + b"NotImplementedError"), + } + + +class ComposeCurtinNetworkPreseed(amp.Command): + """Generate Curtin network preseed for a node. + + :since: 1.7 + """ + + arguments = [ + (b"osystem", amp.Unicode()), + (b"config", StructureAsJSON()), + (b"disable_ipv4", amp.Boolean()), + ] + response = [ + (b"data", StructureAsJSON()), + ] + errors = { + exceptions.NoSuchOperatingSystem: b"NoSuchOperatingSystem", + } + + +class _Power(amp.Command): + """Base class for power control commands. + + :since: 1.7 + """ + + arguments = [ + (b"system_id", amp.Unicode()), + (b"hostname", amp.Unicode()), + (b"power_type", amp.Unicode()), + # We can't define a tighter schema here because this is a highly + # variable bag of arguments from a variety of sources. + (b"context", StructureAsJSON()), + ] + response = [] + errors = { + UnknownPowerType: ( + b"UnknownPowerType"), + NotImplementedError: ( + b"NotImplementedError"), + PowerActionFail: ( + b"PowerActionFail"), + exceptions.PowerActionAlreadyInProgress: ( + b"PowerActionAlreadyInProgress"), + } + + +class PowerOn(_Power): + """Turn a node's power on. + + :since: 1.7 + """ + + +class PowerOff(_Power): + """Turn a node's power off. + + :since: 1.7 + """ + + +class PowerQuery(_Power): + """Query a node's power state. + + :since: 1.7 + """ + response = [ + (b"state", amp.Unicode()), + ] + + +class _ConfigureDHCP(amp.Command): + """Configure a DHCP server. + + :since: 1.7 + """ + arguments = [ + (b"omapi_key", amp.Unicode()), + (b"subnet_configs", amp.AmpList([ + (b"subnet", amp.Unicode()), + (b"subnet_mask", amp.Unicode()), + (b"subnet_cidr", amp.Unicode()), + (b"broadcast_ip", amp.Unicode()), + (b"interface", amp.Unicode()), + (b"router_ip", amp.Unicode()), + (b"dns_servers", amp.Unicode()), + (b"ntp_server", amp.Unicode()), + (b"domain_name", amp.Unicode()), + (b"ip_range_low", amp.Unicode()), + (b"ip_range_high", amp.Unicode()), + ])), + ] + response = [] + errors = {exceptions.CannotConfigureDHCP: b"CannotConfigureDHCP"} + + +class ConfigureDHCPv4(_ConfigureDHCP): + """Configure the DHCPv4 server. + + :since: 1.7 + """ + + +class ConfigureDHCPv6(_ConfigureDHCP): + """Configure the DHCPv6 server. + + :since: 1.7 + """ + + +class CreateHostMaps(amp.Command): + """Create host maps in the DHCP server's configuration. + + :since: 1.7 + """ + + arguments = [ + (b"mappings", amp.AmpList([ + (b"ip_address", amp.Unicode()), + (b"mac_address", amp.Unicode()), + ])), + (b"shared_key", amp.Unicode()), + ] + response = [] + errors = { + exceptions.CannotCreateHostMap: ( + b"CannotCreateHostMap"), + } + + +class RemoveHostMaps(amp.Command): + """Remove host maps from the DHCP server's configuration. + + :since: 1.7 + """ + + arguments = [ + (b"ip_addresses", amp.ListOf(amp.Unicode())), + (b"shared_key", amp.Unicode()), + ] + response = [] + errors = { + exceptions.CannotRemoveHostMap: ( + b"CannotRemoveHostMap"), + } + + +class ImportBootImages(amp.Command): + """Import boot images and report the final + boot images that exist on the cluster. + + :since: 1.7 + """ + + arguments = [ + (b"sources", amp.AmpList( + [(b"url", amp.Unicode()), + (b"keyring_data", Bytes()), + (b"selections", amp.AmpList( + [(b"os", amp.Unicode()), + (b"release", amp.Unicode()), + (b"arches", amp.ListOf(amp.Unicode())), + (b"subarches", amp.ListOf(amp.Unicode())), + (b"labels", amp.ListOf(amp.Unicode()))]))])), + (b"http_proxy", ParsedURL(optional=True)), + (b"https_proxy", ParsedURL(optional=True)), + ] + response = [] + errors = [] + + +class StartMonitors(amp.Command): + """Starts monitors(s) on the cluster. + + :since: 1.7 + """ + + arguments = [ + (b"monitors", amp.AmpList( + [(b"deadline", amp.DateTime()), + (b"context", StructureAsJSON()), + (b"id", amp.Unicode()), + ])) + ] + response = [] + errors = [] + + +class CancelMonitor(amp.Command): + """Cancels an existing monitor on the cluster. + + :since: 1.7 + """ + + arguments = [ + (b"id", amp.Unicode()), + ] + response = [] + error = [] + + +class EvaluateTag(amp.Command): + """Evaluate a tag against all of the cluster's nodes. + + :since: 1.7 + """ + + arguments = [ + (b"tag_name", amp.Unicode()), + (b"tag_definition", amp.Unicode()), + (b"tag_nsmap", amp.AmpList([ + (b"prefix", amp.Unicode()), + (b"uri", amp.Unicode()), + ])), + # A 3-part credential string for the web API. + (b"credentials", amp.Unicode()), + ] + response = [] + errors = [] + + +class AddVirsh(amp.Command): + """Probe for and enlist virsh VMs attached to the cluster. + + :since: 1.7 + """ + + arguments = [ + (b"poweraddr", amp.Unicode()), + (b"password", amp.Unicode(optional=True)), + (b"prefix_filter", amp.Unicode(optional=True)), + ] + response = [] + errors = [] + + +class AddSeaMicro15k(amp.Command): + """Probe for and enlist seamicro15k machines attached to the cluster. + + :since: 1.7 + """ + arguments = [ + (b"mac", amp.Unicode()), + (b"username", amp.Unicode()), + (b"password", amp.Unicode()), + (b"power_control", amp.Unicode(optional=True)), + ] + response = [] + errors = { + exceptions.NoIPFoundForMACAddress: b"NoIPFoundForMACAddress", + } + + +class EnlistNodesFromMSCM(amp.Command): + """Probe for and enlist mscm machines attached to the cluster. + + :since: 1.7 + """ + arguments = [ + (b"host", amp.Unicode()), + (b"username", amp.Unicode()), + (b"password", amp.Unicode()), + ] + response = [] + errors = {} + + +class EnlistNodesFromUCSM(amp.Command): + """Probe for and enlist ucsm machines attached to the cluster. + + :since: 1.7 + """ + arguments = [ + (b"url", amp.Unicode()), + (b"username", amp.Unicode()), + (b"password", amp.Unicode()), + ] + response = [] + errors = {} + + +class IsImportBootImagesRunning(amp.Command): + """Check if the import boot images task is running on the cluster. + + :since: 1.7 + """ + arguments = [] + response = [ + (b"running", amp.Boolean()), + ] + errors = {} diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/clusterservice.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/clusterservice.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/clusterservice.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/clusterservice.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,54 +14,110 @@ __metaclass__ = type __all__ = [ "ClusterClientService", - "ClusterService", ] +from functools import partial import json +import logging +from os import urandom import random +import re from urlparse import urlparse +from apiclient.creds import convert_string_to_tuple from apiclient.utils import ascii_url -from provisioningserver.boot import tftppath +from provisioningserver import concurrency from provisioningserver.cluster_config import ( get_cluster_uuid, get_maas_url, ) -from provisioningserver.config import Config -from provisioningserver.driver import ( +from provisioningserver.drivers import ( ArchitectureRegistry, PowerTypeRegistry, ) +from provisioningserver.drivers.hardware.mscm import probe_and_enlist_mscm +from provisioningserver.drivers.hardware.seamicro import ( + probe_seamicro15k_and_enlist, + ) +from provisioningserver.drivers.hardware.ucsm import probe_and_enlist_ucsm +from provisioningserver.drivers.hardware.virsh import probe_virsh_and_enlist +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.logger.utils import log_call +from provisioningserver.network import discover_networks from provisioningserver.rpc import ( cluster, common, + dhcp, exceptions, region, ) +from provisioningserver.rpc.boot_images import ( + import_boot_images, + is_import_boot_images_running, + list_boot_images, + ) +from provisioningserver.rpc.common import RPCProtocol +from provisioningserver.rpc.dhcp import ( + create_host_maps, + remove_host_maps, + ) from provisioningserver.rpc.interfaces import IConnection -from twisted.application.internet import ( - StreamServerEndpointService, - TimerService, +from provisioningserver.rpc.monitors import ( + cancel_monitor, + start_monitors, + ) +from provisioningserver.rpc.osystems import ( + compose_curtin_network_preseed, + gen_operating_systems, + get_os_release_title, + get_preseed_data, + validate_license_key, + ) +from provisioningserver.rpc.power import ( + get_power_state, + maybe_change_power_state, + ) +from provisioningserver.rpc.tags import evaluate_tag +from provisioningserver.security import ( + calculate_digest, + get_shared_secret_from_filesystem, + ) +from provisioningserver.utils.network import find_ip_via_arp +from provisioningserver.utils.twisted import DeferredValue +from twisted.application.internet import TimerService +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, ) -from twisted.internet import ssl -from twisted.internet.defer import inlineCallbacks from twisted.internet.endpoints import ( connectProtocol, TCP4ClientEndpoint, - TCP4ServerEndpoint, ) -from twisted.internet.error import ConnectError -from twisted.internet.protocol import Factory -from twisted.protocols import amp -from twisted.python import ( - filepath, - log, +from twisted.internet.error import ( + ConnectError, + ConnectionClosed, ) +from twisted.internet.threads import deferToThread +from twisted.protocols import amp +from twisted.python import log +from twisted.web import http +import twisted.web.client from twisted.web.client import getPage from zope.interface import implementer -class Cluster(amp.AMP, object): +maaslog = get_maas_logger("rpc.cluster") + + +def catch_probe_and_enlist_error(name, failure): + """Logs any errors when trying to probe and enlist a chassis.""" + maaslog.error( + "Failed to probe and enlist %s nodes: %s", + name, failure.getErrorMessage()) + return None + + +class Cluster(RPCProtocol): """The RPC protocol supported by a cluster controller. This can be used on the client or server end of a connection; once a @@ -77,6 +133,13 @@ """ return {b"ident": get_cluster_uuid().decode("ascii")} + @cluster.Authenticate.responder + def authenticate(self, message): + secret = get_shared_secret_from_filesystem() + salt = urandom(16) # 16 bytes of high grade noise. + digest = calculate_digest(secret, message, salt) + return {"digest": digest, "salt": salt} + @cluster.ListBootImages.responder def list_boot_images(self): """list_boot_images() @@ -84,9 +147,29 @@ Implementation of :py:class:`~provisioningserver.rpc.cluster.ListBootImages`. """ - images = tftppath.list_boot_images( - Config.load_from_cache()['tftp']['resource_root']) - return {"images": images} + return {"images": list_boot_images()} + + @cluster.ImportBootImages.responder + def import_boot_images(self, sources, http_proxy=None, https_proxy=None): + """import_boot_images() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.ImportBootImages`. + """ + get_proxy_url = lambda url: None if url is None else url.geturl() + import_boot_images( + sources, http_proxy=get_proxy_url(http_proxy), + https_proxy=get_proxy_url(https_proxy)) + return {} + + @cluster.IsImportBootImagesRunning.responder + def is_import_boot_images_running(self): + """is_import_boot_images_running() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.IsImportBootImagesRunning`. + """ + return {"running": is_import_boot_images_running()} @cluster.DescribePowerTypes.responder def describe_power_types(self): @@ -108,37 +191,225 @@ ], } + @cluster.ListOperatingSystems.responder + def list_operating_systems(self): + """list_operating_systems() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.ListOperatingSystems`. + """ + return {"osystems": gen_operating_systems()} + + @cluster.GetOSReleaseTitle.responder + def get_os_release_title(self, osystem, release): + """get_os_release_title() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.GetOSReleaseTitle`. + """ + return {"title": get_os_release_title(osystem, release)} + + @cluster.ValidateLicenseKey.responder + def validate_license_key(self, osystem, release, key): + """validate_license_key() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.ValidateLicenseKey`. + """ + return {"is_valid": validate_license_key(osystem, release, key)} + + @cluster.GetPreseedData.responder + def get_preseed_data( + self, osystem, preseed_type, node_system_id, node_hostname, + consumer_key, token_key, token_secret, metadata_url): + """get_preseed_data() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.GetPreseedData`. + """ + return { + "data": get_preseed_data( + osystem, preseed_type, node_system_id, node_hostname, + consumer_key, token_key, token_secret, metadata_url), + } + + @cluster.ComposeCurtinNetworkPreseed.responder + def compose_curtin_network_preseed(self, osystem, config, disable_ipv4): + """compose_curtin_network_preseed() + + Implementation of + :py:class:`~provisioningserver.rpc.cluster.ComposeCurtinNetworkPreseed` + """ + interfaces = config.get('interfaces', []) + interfaces = [tuple(interface) for interface in interfaces] + auto_interfaces = config.get('auto_interfaces', []) + ips_mapping = config.get('ips_mapping', {}) + gateways_mapping = config.get('gateways_mapping', {}) + nameservers = config.get('nameservers', []) + netmasks = config.get('netmasks', {}) + return { + 'data': compose_curtin_network_preseed( + osystem, interfaces, auto_interfaces, ips_mapping=ips_mapping, + gateways_mapping=gateways_mapping, disable_ipv4=disable_ipv4, + nameservers=nameservers, netmasks=netmasks), + } + + @log_call(level=logging.DEBUG) + @cluster.PowerOn.responder + def power_on(self, system_id, hostname, power_type, context): + """Turn a node on.""" + d = maybe_change_power_state( + system_id, hostname, power_type, power_change='on', + context=context) + d.addCallback(lambda _: {}) + return d + + @log_call(level=logging.DEBUG) + @cluster.PowerOff.responder + def power_off(self, system_id, hostname, power_type, context): + """Turn a node off.""" + d = maybe_change_power_state( + system_id, hostname, power_type, power_change='off', + context=context) + d.addCallback(lambda _: {}) + return d + + @cluster.PowerQuery.responder + def power_query(self, system_id, hostname, power_type, context): + d = get_power_state( + system_id, hostname, power_type, context=context) + d.addCallback(lambda x: {'state': x}) + return d + + @cluster.ConfigureDHCPv4.responder + def configure_dhcpv4(self, omapi_key, subnet_configs): + server = dhcp.DHCPv4Server(omapi_key) + d = concurrency.dhcp.run( + deferToThread, dhcp.configure, server, subnet_configs) + d.addCallback(lambda _: {}) + return d + + @cluster.ConfigureDHCPv6.responder + def configure_dhcpv6(self, omapi_key, subnet_configs): + server = dhcp.DHCPv6Server(omapi_key) + d = concurrency.dhcp.run( + deferToThread, dhcp.configure, server, subnet_configs) + d.addCallback(lambda _: {}) + return d + + @cluster.CreateHostMaps.responder + def create_host_maps(self, mappings, shared_key): + d = concurrency.dhcp.run( + deferToThread, create_host_maps, mappings, shared_key) + d.addCallback(lambda _: {}) + return d + + @cluster.RemoveHostMaps.responder + def remove_host_maps(self, ip_addresses, shared_key): + d = concurrency.dhcp.run( + deferToThread, remove_host_maps, ip_addresses, shared_key) + d.addCallback(lambda _: {}) + return d + + @cluster.StartMonitors.responder + def start_monitors(self, monitors): + start_monitors(monitors) + return {} + + @cluster.CancelMonitor.responder + def cancel_timer(self, id): + cancel_monitor(id) + return {} + @amp.StartTLS.responder def get_tls_parameters(self): """get_tls_parameters() Implementation of :py:class:`~twisted.protocols.amp.StartTLS`. """ - # TODO: Obtain certificates from a config store. - testing = filepath.FilePath(__file__).sibling("testing") - with testing.child("cluster.crt").open() as fin: - tls_localCertificate = ssl.PrivateCertificate.loadPEM(fin.read()) - with testing.child("trust.crt").open() as fin: - tls_verifyAuthorities = [ - ssl.Certificate.loadPEM(fin.read()), - ] - return { - "tls_localCertificate": tls_localCertificate, - "tls_verifyAuthorities": tls_verifyAuthorities, - } + try: + from provisioningserver.rpc.testing import tls + except ImportError: + # This is not a development/test environment. + # XXX: Return production TLS parameters. + return {} + else: + return tls.get_tls_parameters_for_cluster() + @cluster.EvaluateTag.responder + def evaluate_tag(self, tag_name, tag_definition, tag_nsmap, credentials): + """evaluate_tag() -class ClusterService(StreamServerEndpointService): - """A cluster controller RPC service. + Implementation of + :py:class:`~provisioningserver.rpc.cluster.EvaluateTag`. + """ + # It's got to run in a thread because it does blocking IO. + d = deferToThread( + evaluate_tag, tag_name, tag_definition, + # Transform tag_nsmap into a format that LXML likes. + {entry["prefix"]: entry["uri"] for entry in tag_nsmap}, + # Parse the credential string into a 3-tuple. + convert_string_to_tuple(credentials)) + return d.addCallback(lambda _: {}) + + @cluster.AddVirsh.responder + def add_virsh(self, poweraddr, password, prefix_filter): + """add_virsh() - This is a service - in the Twisted sense - that exposes the - ``Cluster`` protocol on the given port. - """ + Implementation of :py:class:`~provisioningserver.rpc.cluster.AddVirsh`. + """ + d = deferToThread( + probe_virsh_and_enlist, + poweraddr, password, prefix_filter) + d.addErrback(partial(catch_probe_and_enlist_error, "virsh")) + return {} + + @cluster.AddSeaMicro15k.responder + def add_seamicro15k(self, mac, username, password, power_control=None): + """add_virsh() - def __init__(self, reactor, port): - super(ClusterService, self).__init__( - TCP4ServerEndpoint(reactor, port), - Factory.forProtocol(Cluster)) + Implementation of + :py:class:`~provisioningserver.rpc.cluster.AddSeaMicro15k`. + """ + ip = find_ip_via_arp(mac) + if ip is not None: + d = deferToThread( + probe_seamicro15k_and_enlist, + ip, username, password, + power_control=power_control) + d.addErrback( + partial(catch_probe_and_enlist_error, "SeaMicro 15000")) + else: + message = "Couldn't find IP address for MAC %s" % mac + maaslog.warning(message) + raise exceptions.NoIPFoundForMACAddress(message) + return {} + + @cluster.EnlistNodesFromMSCM.responder + def enlist_nodes_from_mscm(self, host, username, password): + """enlist_nodes_from_mscm() + + Implemention of + :py:class:`~provisioningserver.rpc.cluster.EnlistNodesFromMSCM`. + """ + d = deferToThread( + probe_and_enlist_mscm, + host, username, password) + d.addErrback(partial(catch_probe_and_enlist_error, "Moonshot")) + return {} + + @cluster.EnlistNodesFromUCSM.responder + def enlist_nodes_from_ucsm(self, url, username, password): + """enlist_nodes_from_ucsm() + + Implemention of + :py:class:`~provisioningserver.rpc.cluster.EnlistNodesFromUCSM`. + """ + d = deferToThread( + probe_and_enlist_ucsm, + url, username, password) + d.addErrback(partial(catch_probe_and_enlist_error, "UCS")) + return {} @implementer(IConnection) @@ -155,6 +426,19 @@ :ivar service: A reference to the :class:`ClusterClientService` that made self. + :ivar authenticated: A py:class:`DeferredValue` that will be set when the + region has been authenticated. If the region has been authenticated, + this will be ``True``, otherwise it will be ``False``. If there was an + error, it will return a :py:class:`twisted.python.failure.Failure` via + errback. + + :ivar ready: A py:class:`DeferredValue` that will be set when this + connection is up and has performed authentication on the region. If + everything has gone smoothly it will be set to the name of the + event-loop connected to, otherwise it will be set to: `RuntimeError` + if the client service is not running; `KeyError` if there's already a + live connection for this event-loop; or `AuthenticationFailed` if, + guess, the authentication failed. """ address = None @@ -166,20 +450,119 @@ self.address = address self.eventloop = eventloop self.service = service + # Events for this protocol's life-cycle. + self.authenticated = DeferredValue() + self.ready = DeferredValue() @property def ident(self): """The ident of the remote event-loop.""" return self.eventloop + @inlineCallbacks + def authenticateRegion(self): + """Authenticate the region.""" + secret = get_shared_secret_from_filesystem() + message = urandom(16) # 16 bytes of the finest. + response = yield self.callRemote( + region.Authenticate, message=message) + salt, digest = response["salt"], response["digest"] + digest_local = calculate_digest(secret, message, salt) + returnValue(digest == digest_local) + + def registerWithRegion(self): + uuid = get_cluster_uuid() + networks = discover_networks() + url = get_maas_url() + + def cb_register(_): + log.msg( + "Cluster '%s' registered (via %s)." + % (uuid, self.eventloop)) + return True + + def eb_register(failure): + failure.trap(exceptions.CannotRegisterCluster) + log.msg( + "Cluster '%s' REJECTED by the region (via %s)." + % (uuid, self.eventloop)) + return False + + d = self.callRemote( + region.Register, uuid=uuid, networks=networks, + url=urlparse(url)) + return d.addCallbacks(cb_register, eb_register) + + @inlineCallbacks + def performHandshake(self): + d_authenticate = self.authenticateRegion() + self.authenticated.observe(d_authenticate) + authenticated = yield d_authenticate + + if authenticated: + log.msg("Event-loop '%s' authenticated." % self.ident) + registered = yield self.registerWithRegion() + if registered: + self.service.connections[self.eventloop] = self + self.ready.set(self.eventloop) + else: + self.transport.loseConnection() + self.ready.fail( + exceptions.RegistrationFailed( + "Event-loop '%s' rejected registration." + % self.ident)) + else: + log.msg( + "Event-loop '%s' FAILED authentication; " + "dropping connection." % self.ident) + self.transport.loseConnection() + self.ready.fail( + exceptions.AuthenticationFailed( + "Event-loop '%s' failed authentication." + % self.eventloop)) + + def handshakeSucceeded(self, result): + """The handshake (identify and authenticate) succeeded. + + This does *NOT* mean that the region was successfully authenticated, + merely that the process of authentication did not encounter an error. + """ + + def handshakeFailed(self, failure): + """The handshake (identify and authenticate) failed.""" + if failure.check(ConnectionClosed): + # There has been a disconnection, clean or otherwise. There's + # nothing we can do now, so do nothing. The reason will have been + # logged elsewhere. + self.ready.fail(failure) + else: + log.err( + failure, "Event-loop '%s' handshake failed; " + "dropping connection." % self.ident) + self.transport.loseConnection() + self.ready.fail(failure) + def connectionMade(self): super(ClusterClient, self).connectionMade() + if not self.service.running: + log.msg( + "Event-loop '%s' will be disconnected; the cluster's " + "client service is not running." % self.ident) self.transport.loseConnection() + self.authenticated.set(None) + self.ready.fail(RuntimeError("Service not running.")) elif self.eventloop in self.service.connections: + log.msg( + "Event-loop '%s' is already connected; " + "dropping connection." % self.ident) self.transport.loseConnection() + self.authenticated.set(None) + self.ready.fail(KeyError( + "Event-loop '%s' already connected." % self.eventloop)) else: - self.service.connections[self.eventloop] = self + return self.performHandshake().addCallbacks( + self.handshakeSucceeded, self.handshakeFailed) def connectionLost(self, reason): if self.eventloop in self.service.connections: @@ -196,7 +579,7 @@ # the connection. Here we check that the remote event-loop is # who we expected it to be. response = yield self.callRemote(region.Identify) - remote_name = response.get("name") + remote_name = response.get("ident") if remote_name != self.eventloop: log.msg( "The remote event-loop identifies itself as %s, but " @@ -209,6 +592,49 @@ log.msg("Peer certificate: %r" % self.peerCertificate) +class PatchedURI(twisted.web.client._URI): + + @classmethod + def fromBytes(cls, uri, defaultPort=None): + """Patched replacement for `twisted.web.client._URI.fromBytes`. + + The Twisted version of this function breaks when you give it a URL + whose netloc is based on an IPv6 address. + """ + uri = uri.strip() + scheme, netloc, path, params, query, fragment = http.urlparse(uri) + + if defaultPort is None: + scheme_ports = { + 'https': 443, + 'http': 80, + } + defaultPort = scheme_ports.get(scheme, 80) + + if '[' in netloc: + # IPv6 address. This is complicated. + parsed_netloc = re.match( + '\\[(?P[0-9A-Fa-f:.]+)\\]([:](?P[0-9]+))?$', + netloc) + host, port = parsed_netloc.group('host', 'port') + elif ':' in netloc: + # IPv4 address or hostname, with port spec. This is easy. + host, port = netloc.split(':') + else: + # IPv4 address or hostname, without port spec. This is trivial. + host = netloc + port = None + + if port is None: + port = defaultPort + try: + port = int(port) + except ValueError: + port = defaultPort + + return cls(scheme, netloc, host, port, path, params, query, fragment) + + class ClusterClientService(TimerService, object): """A cluster controller RPC client service. @@ -219,18 +645,34 @@ :ivar connections: A mapping of eventloop names to protocol instances connected to it. + :ivar time_started: Records the time that `startService` was last called, + or `None` if it hasn't yet. """ INTERVAL_LOW = 2 # seconds. INTERVAL_MID = 10 # seconds. INTERVAL_HIGH = 30 # seconds. + time_started = None + def __init__(self, reactor): super(ClusterClientService, self).__init__( self._calculate_interval(None, None), self.update) self.connections = {} self.clock = reactor + # XXX jtv 2014-09-23, bug=1372767: Fix + # twisted.web.client._URI.fromBytes to handle IPv6 addresses. + # A `getPage` call on Twisted's web client breaks if you give it a + # URL with an IPv6 address, at the point where `_makeGetterFactory` + # calls `fromBytes`. That last function assumes that a colon can only + # occur in the URL's netloc portion as part of a port specification. + twisted.web.client._URI = PatchedURI + + def startService(self): + self.time_started = self.clock.seconds() + super(ClusterClientService, self).startService() + def getClient(self): """Returns a :class:`common.Client` connected to a region. @@ -254,18 +696,29 @@ """ try: info_url = self._get_rpc_info_url() - info_page = yield getPage(info_url) - info = json.loads(info_page) + info = yield self._fetch_rpc_info(info_url) eventloops = info["eventloops"] - yield self._update_connections(eventloops) + if eventloops is None: + # This means that the region process we've just asked about + # RPC event-loop endpoints is not running the RPC advertising + # service. It could be just starting up for example. + log.msg("Region is not advertising RPC endpoints.") + else: + yield self._update_connections(eventloops) except ConnectError as error: self._update_interval(None, len(self.connections)) - log.msg("Region not available: %s" % (error,)) + log.msg( + "Region not available: %s (While requesting RPC info at %s)." + % (error, info_url)) except: self._update_interval(None, len(self.connections)) log.err() else: - self._update_interval(len(eventloops), len(self.connections)) + if eventloops is None: + # The advertising service on the region was not running yet. + self._update_interval(None, len(self.connections)) + else: + self._update_interval(len(eventloops), len(self.connections)) @staticmethod def _get_rpc_info_url(): @@ -275,6 +728,10 @@ url = url.geturl() return ascii_url(url) + @staticmethod + def _fetch_rpc_info(url): + return getPage(url).addCallback(json.loads) + def _calculate_interval(self, num_eventloops, num_connections): """Calculate the update interval. @@ -282,11 +739,21 @@ connections, so that this can quickly obtain its first connection. + The interval is also `INTERVAL_LOW` for a time after the service + starts. This helps to get everything connected quickly when the + cluster is started at a similar time to the region. + The interval changes to `INTERVAL_MID` seconds when there are some connections, but fewer than there are event-loops. After that it drops back to `INTERVAL_HIGH` seconds. """ + if self.time_started is not None: + time_running = self.clock.seconds() - self.time_started + if time_running < self.INTERVAL_HIGH: + # This service has recently started; keep trying regularly. + return self.INTERVAL_LOW + if num_eventloops is None: # The region is not available; keep trying regularly. return self.INTERVAL_LOW diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/common.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/common.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/common.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/common.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,21 +13,65 @@ __metaclass__ = type __all__ = [ - "Identify", + "Authenticate", "Client", + "Identify", + "RPCProtocol", ] from provisioningserver.rpc.interfaces import IConnection -from provisioningserver.utils import asynchronous +from provisioningserver.utils.twisted import asynchronous +from twisted.internet.defer import Deferred from twisted.protocols import amp class Identify(amp.Command): - """Request the identity of the remote side, e.g. its UUID.""" + """Request the identity of the remote side, e.g. its UUID. + + :since: 1.5 + """ response = [(b"ident", amp.Unicode())] +class Authenticate(amp.Command): + """Authenticate the remote side. + + The procedure is as follows: + + - When establishing a new connection, the region and the cluster call + `Authenticate` on each other, passing a random chunk of data in + `message`. This message must be unique to avoid replay attacks. + + - The remote side adds some salt to the message, and calculates an HMAC + digest, keyed with the shared secret. + + The salt is intended to prevent replay attacks: it prevents an intruder + from authenticating itself by calling `Authenticate` on the caller (or + another endpoint in the same MAAS installation) and sending the same + message, receiving the digest and passing it back to the caller. + + - The remote side returns this digest and the salt. The caller performs + the same calculation, and compares the digests. + + - If the digests match, the connection is put into rotation. + + - If the digests do not match, the connection is closed immediately, and + an error is logged. + + :since: 1.7 + """ + + arguments = [ + (b"message", amp.String()), + ] + response = [ + (b"digest", amp.String()), + (b"salt", amp.String()), # Is 'salt' the right term here? + ] + errors = [] + + class Client: """Wrapper around an :class:`amp.AMP` instance. @@ -48,7 +92,36 @@ return self._conn.ident @asynchronous - def __call__(self, cmd, **kwargs): + def __call__(self, cmd, *args, **kwargs): + """Call a remote RPC method. + + This is how the client is normally used. + + :note: + Though the call signature shows positional arguments, their use is + an error. They're in the signature is so this method can detect + them and provide a better error message than that from Python. + Python's error message when arguments don't match the call's + signature is not great at best, but it also makes it hard to + figure out the receiver when the `TypeError` is raised in a + different stack from the caller's, e.g. when calling into the + Twisted reactor from a thread. + + :param cmd: The `amp.Command` child class representing the remote + method to be invoked. + :param kwargs: Any parameters to the remote method. Only keyword + arguments are accepted. + :return: A deferred result. Call its `wait` method (with a timeout + in seconds) to block on the call's completion. + """ + if len(args) != 0: + receiver_name = "%s.%s" % ( + self.__module__, self.__class__.__name__) + raise TypeError( + "%s called with %d positional arguments, %r, but positional " + "arguments are not supported. Usage: client(command, arg1=" + "value1, ...)" % (receiver_name, len(args), args)) + return self._conn.callRemote(cmd, **kwargs) @asynchronous @@ -68,3 +141,31 @@ def __hash__(self): return hash(self._conn) + + +class RPCProtocol(amp.AMP, object): + """A specialisation of `amp.AMP`. + + It's hard to track exactly when an `amp.AMP` protocol is connected to its + transport, or disconnected, from the "outside". It's necessary to subclass + and override `connectionMade` and `connectionLost` and signal from there, + which is what this class does. + + :ivar onConnectionMade: A `Deferred` that fires when `connectionMade` has + been called, i.e. this protocol is now connected. + :ivar onConnectionLost: A `Deferred` that fires when `connectionLost` has + been called, i.e. this protocol is no longer connected. + """ + + def __init__(self): + super(RPCProtocol, self).__init__() + self.onConnectionMade = Deferred() + self.onConnectionLost = Deferred() + + def connectionMade(self): + super(RPCProtocol, self).connectionMade() + self.onConnectionMade.callback(None) + + def connectionLost(self, reason): + super(RPCProtocol, self).connectionLost(reason) + self.onConnectionLost.callback(None) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/dhcp.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/dhcp.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/dhcp.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/dhcp.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,245 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""RPC helpers relating to DHCP.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "configure", + "create_host_maps", + "DHCPv4Server", + "DHCPv6Server", + "remove_host_maps", +] + +from abc import ( + ABCMeta, + abstractmethod, + abstractproperty, + ) + +from provisioningserver.dhcp import control +from provisioningserver.dhcp.config import get_config +from provisioningserver.dhcp.omshell import Omshell +from provisioningserver.logger import get_maas_logger +from provisioningserver.rpc.exceptions import ( + CannotConfigureDHCP, + CannotCreateHostMap, + CannotRemoveHostMap, + ) +from provisioningserver.utils.fs import sudo_write_file +from provisioningserver.utils.shell import ExternalProcessError +from provisioningserver.utils.twisted import synchronous + + +maaslog = get_maas_logger("dhcp") + +# Location of the DHCPv4 configuration file. +DHCPv4_CONFIG_FILE = '/etc/maas/dhcpd.conf' + +# Location of the DHCPv4 interfaces file. +DHCPv4_INTERFACES_FILE = '/var/lib/maas/dhcpd-interfaces' + +# Location of the DHCPv6 configuration file. +DHCPv6_CONFIG_FILE = '/etc/maas/dhcpd6.conf' + +# Location of the DHCPv6 interfaces file. +DHCPv6_INTERFACES_FILE = '/var/lib/maas/dhcpd6-interfaces' + +# Message to put in the DHCP config file when the DHCP server gets stopped. +DISABLED_DHCP_SERVER = "# DHCP server stopped and disabled." + + +class DHCPServer: + """Represents the settings and controls for a DHCP server. + + :cvar descriptive_name: A name to use for this server in human-readable + texts. + :cvar template_basename: The base filename for the template to use when + generating configuration for this server. + :cvar interfaces_filename: The full path and filename for the server's + interfaces file. + :cvar config_filename: The full path and filename for the server's + configuration file. + :ivar omapi_key: The OMAPI secret key for the server. + """ + + __metaclass__ = ABCMeta + + descriptive_name = abstractproperty() + template_basename = abstractproperty() + interfaces_filename = abstractproperty() + config_filename = abstractproperty() + + def __init__(self, omapi_key): + super(DHCPServer, self).__init__() + self.omapi_key = omapi_key + + @abstractmethod + def stop(self): + """Stop the DHCP server.""" + + @abstractmethod + def restart(self): + """Restart the DHCP server.""" + + +@synchronous +def configure(server, subnet_configs): + """Configure the DHCPv6/DHCPv4 server, and restart it as appropriate. + + :param server: A `DHCPServer` instance. + :param subnet_configs: List of dicts with subnet parameters for each + subnet for which the DHCP server should serve DHCP. If no subnets + are defined, the DHCP server will be stopped. + """ + stopping = len(subnet_configs) == 0 + + if stopping: + dhcpd_config = DISABLED_DHCP_SERVER + else: + dhcpd_config = get_config( + server.template_basename, omapi_key=server.omapi_key, + dhcp_subnets=subnet_configs) + + interfaces = {subnet['interface'] for subnet in subnet_configs} + interfaces_config = ' '.join(sorted(interfaces)) + + try: + sudo_write_file(server.config_filename, dhcpd_config) + sudo_write_file(server.interfaces_filename, interfaces_config) + except ExternalProcessError as e: + # ExternalProcessError.__unicode__ contains a generic failure + # message as well as the command and its error output. On the + # other hand, ExternalProcessError.output_as_unicode contains just + # the error output which is probably the best information on what + # went wrong. Log the full error information, but keep the + # exception message short and to the point. + maaslog.error( + "Could not rewrite %s server configuration (for network " + "interfaces %s): %s", server.descriptive_name, + interfaces_config, unicode(e)) + raise CannotConfigureDHCP( + "Could not rewrite %s server configuration: %s" % ( + server.descriptive_name, e.output_as_unicode)) + + if stopping: + try: + server.stop() + except ExternalProcessError as e: + maaslog.error( + "%s server failed to stop: %s", server.descriptive_name, + unicode(e)) + raise CannotConfigureDHCP( + "%s server failed to stop: %s" % ( + server.descriptive_name, e.output_as_unicode)) + else: + try: + server.restart() + except ExternalProcessError as e: + maaslog.error( + "%s server failed to restart (for network interfaces " + "%s): %s", server.descriptive_name, interfaces_config, + unicode(e)) + raise CannotConfigureDHCP( + "%s server failed to restart: %s" % ( + server.descriptive_name, e.output_as_unicode)) + + +class DHCPv4Server(DHCPServer): + """Represents the settings and controls for a DHCPv4 server. + + See `DHCPServer`. + """ + + descriptive_name = "DHCPv4" + template_basename = 'dhcpd.conf.template' + interfaces_filename = DHCPv4_INTERFACES_FILE + config_filename = DHCPv4_CONFIG_FILE + + def stop(self): + """Stop the DHCPv4 server.""" + control.stop_dhcpv4() + + def restart(self): + """Restart the DHCPv4 server.""" + control.restart_dhcpv4() + + +class DHCPv6Server(DHCPServer): + """Represents the settings and controls for a DHCPv6 server. + + See `DHCPServer`. + """ + + descriptive_name = "DHCPv6" + template_basename = 'dhcpd6.conf.template' + interfaces_filename = DHCPv6_INTERFACES_FILE + config_filename = DHCPv6_CONFIG_FILE + + def stop(self): + """Stop the DHCPv6 server.""" + control.stop_dhcpv6() + + def restart(self): + """Restart the DHCPv6 server.""" + control.restart_dhcpv6() + + +@synchronous +def create_host_maps(mappings, shared_key): + """Create DHCP host maps for the given mappings. + + :param mappings: A list of dicts containing ``ip_address`` and + ``mac_address`` keys. + :param shared_key: The key used to access the DHCP server via OMAPI. + """ + # See bug 1039362 regarding server_address. + omshell = Omshell(server_address='127.0.0.1', shared_key=shared_key) + for mapping in mappings: + ip_address = mapping["ip_address"] + mac_address = mapping["mac_address"] + try: + omshell.create(ip_address, mac_address) + except ExternalProcessError as e: + maaslog.error( + "Could not create host map for %s with address %s: %s", + mac_address, ip_address, unicode(e)) + raise CannotCreateHostMap("%s -> %s: %s" % ( + mac_address, ip_address, e.output_as_unicode)) + + +@synchronous +def remove_host_maps(ip_addresses, shared_key): + """Remove DHCP host maps for the given IP addresses. + + Additionally, this will ensure that any lease present for the IP + address(es) supplied is also forcefully expired. Generally, host + maps don't create leases unless the host map is inside the dynamic + range, however this is still safe to call and can be called to + guarantee that any IP address is left expired regardless of whether + it's in the dynamic range or not. + + :param ip_addresses: A list of IP addresses. + :param shared_key: The key used to access the DHCP server via OMAPI. + """ + # See bug 1039362 regarding server_address. + omshell = Omshell(server_address='127.0.0.1', shared_key=shared_key) + for ip_address in ip_addresses: + try: + omshell.remove(ip_address) + omshell.nullify_lease(ip_address) + except ExternalProcessError as e: + maaslog.error( + "Could not remove host map for %s: %s", + ip_address, unicode(e)) + raise CannotRemoveHostMap("%s: %s" % ( + ip_address, e.output_as_unicode)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/exceptions.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/exceptions.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/exceptions.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/exceptions.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,9 +13,134 @@ __metaclass__ = type __all__ = [ + "AuthenticationFailed", + "CannotConfigureDHCP", + "CannotCreateHostMap", + "CannotRegisterCluster", + "CannotRemoveHostMap", + "MultipleFailures", "NoConnectionsAvailable", + "NodeStateViolation", + "NoSuchCluster", + "NoSuchEventType", + "NoSuchNode", + "NoSuchOperatingSystem", + "RegistrationFailed", ] +from twisted.python.failure import Failure + class NoConnectionsAvailable(Exception): """There is no connection available.""" + + def __init__(self, message='', uuid=None): + super(NoConnectionsAvailable, self).__init__(message) + self.uuid = uuid + + +class NoSuchEventType(Exception): + """The specified event type was not found.""" + + @classmethod + def from_name(cls, name): + return cls( + "Event type with name=%s could not be found." % name + ) + + +class NoSuchNode(Exception): + """The specified node was not found.""" + + @classmethod + def from_system_id(cls, system_id): + return cls( + "Node with system_id=%s could not be found." % system_id + ) + + @classmethod + def from_mac_address(cls, mac_address): + return cls( + "Node with mac_address=%s could not be found." % mac_address + ) + + +class NodeStateViolation(Exception): + """The specified state transition cannot be performed.""" + + +class NoSuchCluster(Exception): + """The specified cluster was not found.""" + + @classmethod + def from_uuid(cls, uuid): + return cls( + "The cluster with UUID %s could not " + "be found." % uuid + ) + + +class NoSuchOperatingSystem(Exception): + """The specified OS was not found.""" + + +class CannotConfigureDHCP(Exception): + """Failure while configuring a DHCP server.""" + + +class CannotCreateHostMap(Exception): + """The host map could not be created.""" + + +class CannotRemoveHostMap(Exception): + """The host map could not be removed.""" + + +class MultipleFailures(Exception): + """Represents multiple failures. + + Each argument is a :py:class:`twisted.python.failure.Failure` instance. A + new one of these can be created when in an exception handler simply by + instantiating a new `Failure` instance without arguments. + """ + + def __init__(self, *failures): + for failure in failures: + if not isinstance(failure, Failure): + raise AssertionError( + "All failures must be instances of twisted.python." + "failure.Failure, not %r" % (failure,)) + super(MultipleFailures, self).__init__(*failures) + + +class NodeAlreadyExists(Exception): + """A node already exists with a given MAC address.""" + + +class NoIPFoundForMACAddress(Exception): + """No IP was found for a given MAC address.""" + + +class PowerActionAlreadyInProgress(Exception): + """A power action was requested on a node where a power action is + already in progress. + """ + + +class CannotRegisterCluster(Exception): + """The cluster could not be registered.""" + + @classmethod + def from_uuid(cls, uuid, message): + return cls( + "The cluster with UUID %s could not " + "be registered:\n%s" % (uuid, message) + ) + + +class AuthenticationFailed(Exception): + """One or both sides of the connection failed to authenticate.""" + + +class RegistrationFailed(Exception): + """The region did not or was not able to register the cluster.""" diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Cluster Controller RPC.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "getRegionClient", +] + +import provisioningserver +from provisioningserver.rpc import exceptions + + +def getRegionClient(): + """getRegionClient() + + Get a client with which to make RPCs to the region. + + :raises: :py:class:`~.exceptions.NoConnectionsAvailable` when there + are no open connections to the region controller. + """ + # TODO: retry a couple of times before giving up if the service is + # not running or if exceptions.NoConnectionsAvailable gets raised. + try: + rpc_service = provisioningserver.services.getServiceNamed('rpc') + except KeyError: + raise exceptions.NoConnectionsAvailable( + "Cluster services are unavailable.") + else: + return rpc_service.getClient() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/interfaces.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/interfaces.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/interfaces.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/interfaces.py 2015-07-10 01:27:14.000000000 +0000 @@ -25,10 +25,8 @@ hostCertificate = interface.Attribute( "hostCertificate", "The certificate used locally for TLS.") - # TODO: peerCertificate raises an exception when TLS is not - # activated, or maybe that's just in tests. Investigation is needed. - # peerCertificate = interface.Attribute( - # "peerCertificate", "The certificate used remotely for TLS.") + peerCertificate = interface.Attribute( + "peerCertificate", "The certificate used remotely for TLS.") def callRemote(cmd, **arguments): """Call a remote method with the given arguments.""" diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/monitors.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/monitors.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/monitors.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/monitors.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,83 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""RPC helpers for monitors.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "cancel_monitor", + "start_monitors", +] + +from datetime import datetime + +from provisioningserver.logger import get_maas_logger +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.exceptions import NoConnectionsAvailable +from provisioningserver.rpc.region import MonitorExpired +from twisted.internet import reactor +from twisted.protocols import amp + + +maaslog = get_maas_logger("monitors") + + +# Currently running timers; contains dict with keys of ID mapping to a +# (delayed_call, context) pair. +running_monitors = dict() + + +def start_monitors(monitors, clock=reactor): + """RPC responder to start monitors as specified. + + :param monitors: a `StartMonitors` message. + + Right now the monitors only implement a timer. + + Will create one delayed callback for each of the monitors and if it + reaches its deadline, call `MonitorExpired` in the region passing back the + monitor ID. + """ + for monitor in monitors: + delay = monitor["deadline"] - datetime.now(amp.utc) + monitor_id = monitor["id"] + if monitor_id in running_monitors: + dc, _ = running_monitors.pop(monitor_id) + dc.cancel() + call = clock.callLater( + delay.total_seconds(), monitor_expired, monitor_id) + running_monitors[monitor_id] = (call, monitor["context"]) + + +def monitor_expired(monitor_id): + """Called when a monitor hits its deadline. + + Call MonitorExpired with the context for the monitor. + """ + _, context = running_monitors.pop(monitor_id) + try: + client = getRegionClient() + except NoConnectionsAvailable: + maaslog.error( + "Lost connection to the region, unable to fire timer with ID: %s", + monitor_id) + return None + + return client(MonitorExpired, id=monitor_id, context=context) + + +def cancel_monitor(monitor_id): + """Called from the region to cancel a running timer.""" + try: + dc, _ = running_monitors.pop(monitor_id) + except KeyError: + return + dc.cancel() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/osystems.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/osystems.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/osystems.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/osystems.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,148 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""RPC helpers relating to operating systems.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "gen_operating_systems", + "validate_license_key", +] + +from provisioningserver.drivers.osystem import ( + Node, + OperatingSystemRegistry, + Token, + ) +from provisioningserver.rpc import exceptions + + +def gen_operating_system_releases(osystem): + """Yield operating system release dicts. + + Each dict adheres to the response specification of an operating + system release in the ``ListOperatingSystems`` RPC call. + """ + releases_for_commissioning = set( + osystem.get_supported_commissioning_releases()) + for release in osystem.get_supported_releases(): + requires_license_key = osystem.requires_license_key(release) + can_commission = release in releases_for_commissioning + yield { + "name": release, + "title": osystem.get_release_title(release), + "requires_license_key": requires_license_key, + "can_commission": can_commission, + } + + +def gen_operating_systems(): + """Yield operating system dicts. + + Each dict adheres to the response specification of an operating + system in the ``ListOperatingSystems`` RPC call. + """ + + for _, os in sorted(OperatingSystemRegistry): + default_release = os.get_default_release() + default_commissioning_release = os.get_default_commissioning_release() + yield { + "name": os.name, + "title": os.title, + "releases": gen_operating_system_releases(os), + "default_release": default_release, + "default_commissioning_release": default_commissioning_release, + } + + +def get_os_release_title(osystem, release): + """Get the title for the operating systems release. + + :raises NoSuchOperatingSystem: If ``osystem`` is not found. + """ + try: + osystem = OperatingSystemRegistry[osystem] + except KeyError: + raise exceptions.NoSuchOperatingSystem(osystem) + else: + title = osystem.get_release_title(release) + if title is None: + return "" + return title + + +def validate_license_key(osystem, release, key): + """Validate a license key. + + :raises NoSuchOperatingSystem: If ``osystem`` is not found. + """ + try: + osystem = OperatingSystemRegistry[osystem] + except KeyError: + raise exceptions.NoSuchOperatingSystem(osystem) + else: + return osystem.validate_license_key(release, key) + + +def get_preseed_data( + osystem, preseed_type, node_system_id, node_hostname, + consumer_key, token_key, token_secret, metadata_url): + """Composes preseed data for the given node. + + :param preseed_type: The preseed type being composed. + :param node: The node for which a preseed is being composed. + :param token: OAuth token for the metadata URL. + :param metadata_url: The metdata URL for the node. + :type metadata_url: :py:class:`urlparse.ParseResult` + :return: Preseed data for the given node. + :raise NotImplementedError: when the specified operating system does + not require custom preseed data. + """ + try: + osystem = OperatingSystemRegistry[osystem] + except KeyError: + raise exceptions.NoSuchOperatingSystem(osystem) + else: + return osystem.compose_preseed( + preseed_type, Node(node_system_id, node_hostname), + Token(consumer_key, token_key, token_secret), + metadata_url.geturl()) + + +def compose_curtin_network_preseed(os_name, interfaces, auto_interfaces, + ips_mapping, gateways_mapping, + disable_ipv4, nameservers, netmasks): + """Compose Curtin network preseed for a node. + + :param os_name: Identifying name of the operating system for which a + preseed should be generated. + :param interfaces: A list of interface/MAC pairs for the node. + :param auto_interfaces: A list of MAC addresses whose network interfaces + should come up automatically on node boot. + :param ips_mapping: A dict mapping MAC addresses to lists of the + corresponding network interfaces' IP addresses. + :param gateways_mapping: A dict mapping MAC addresses to lists of the + corresponding network interfaces' default gateways. + :param disable_ipv4: Should this node be installed without IPv4 networking? + :param nameservers: List of DNS servers. + :param netmasks: A dict mapping IP dadresses from `ips_mapping` to their + respective netmasks. + :return: Preseed data, as JSON. + """ + try: + osystem = OperatingSystemRegistry[os_name] + except KeyError: + raise exceptions.NoSuchOperatingSystem(os_name) + else: + return osystem.compose_curtin_network_preseed( + interfaces, auto_interfaces, ips_mapping=ips_mapping, + gateways_mapping=gateways_mapping, disable_ipv4=disable_ipv4, + nameservers=nameservers, netmasks=netmasks) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/power.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/power.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/power.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/power.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,368 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""RPC helpers relating to power control.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "get_power_state", + "query_all_nodes", +] + +from datetime import timedelta +from functools import partial + +from provisioningserver.events import ( + EVENT_TYPES, + send_event_node, + ) +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.power.poweraction import ( + PowerAction, + PowerActionFail, + ) +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.exceptions import ( + NoSuchNode, + PowerActionAlreadyInProgress, + ) +from provisioningserver.rpc.region import ( + MarkNodeFailed, + UpdateNodePowerState, + ) +from provisioningserver.utils.twisted import ( + asynchronous, + deferred, + deferWithTimeout, + pause, + synchronous, + ) +from twisted.internet import reactor +from twisted.internet.defer import ( + DeferredList, + DeferredSemaphore, + inlineCallbacks, + returnValue, + ) +from twisted.internet.task import deferLater +from twisted.internet.threads import deferToThread +from twisted.python import log + +# List of power_types that support querying the power state. +# change_power_state() will only retry changing the power +# state for these power types. +# This is meant to be temporary until all the power types support +# querying the power state of a node. +QUERY_POWER_TYPES = ['amt', 'ipmi', 'mscm', 'sm15k', 'ucsm', 'virsh'] + + +# Timeout for change_power_state(). We set it to 2 minutes by default, +# but it would be lovely if this was configurable. +CHANGE_POWER_STATE_TIMEOUT = timedelta(minutes=2).total_seconds() + + +maaslog = get_maas_logger("power") + + +# We could use a Registry here, but it seems kind of like overkill. +power_action_registry = {} + + +@asynchronous +@inlineCallbacks +def power_change_failure(system_id, hostname, power_change, message): + """Deal with a node failing to be powered up or down.""" + assert power_change in ['on', 'off'], ( + "Unknown power change: %s" % power_change) + maaslog.error( + "Error changing power state (%s) of node: %s (%s)", + power_change, hostname, system_id) + client = getRegionClient() + yield client( + MarkNodeFailed, + system_id=system_id, + error_description=message, + ) + if power_change == 'on': + event_type = EVENT_TYPES.NODE_POWER_ON_FAILED + elif power_change == 'off': + event_type = EVENT_TYPES.NODE_POWER_OFF_FAILED + yield send_event_node(event_type, system_id, hostname, message) + + +@synchronous +def perform_power_change(system_id, hostname, power_type, power_change, + context): + """Issue the given `power_change` command. + + If any exception is raised during the execution of the command, + mark the node as broken and re-raise the exception. + """ + action = PowerAction(power_type) + try: + return action.execute(power_change=power_change, **context) + except PowerActionFail as error: + message = "Node could not be powered %s: %s" % ( + power_change, error) + power_change_failure( + system_id, hostname, power_change, message).wait(15) + raise + + +@asynchronous +@inlineCallbacks +def power_change_success(system_id, hostname, power_change): + assert power_change in ['on', 'off'], ( + "Unknown power change: %s" % power_change) + yield power_state_update(system_id, power_change) + maaslog.info( + "Changed power state (%s) of node: %s (%s)", + power_change, hostname, system_id) + # Emit success event. + if power_change == 'on': + event_type = EVENT_TYPES.NODE_POWERED_ON + elif power_change == 'off': + event_type = EVENT_TYPES.NODE_POWERED_OFF + yield send_event_node(event_type, system_id, hostname) + + +@asynchronous +@inlineCallbacks +def power_change_starting(system_id, hostname, power_change): + assert power_change in ['on', 'off'], ( + "Unknown power change: %s" % power_change) + maaslog.info( + "Changing power state (%s) of node: %s (%s)", + power_change, hostname, system_id) + # Emit starting event. + if power_change == 'on': + event_type = EVENT_TYPES.NODE_POWER_ON_STARTING + elif power_change == 'off': + event_type = EVENT_TYPES.NODE_POWER_OFF_STARTING + yield send_event_node(event_type, system_id, hostname) + + +default_waiting_policy = (1, 2, 2, 4, 6, 8, 12) + + +@asynchronous +@deferred # Always return a Deferred. +def maybe_change_power_state(system_id, hostname, power_type, + power_change, context, clock=reactor): + """Attempt to change the power state of a node. + + If there is no power action already in progress, register this + action and then pass change_power_state() to the reactor to call + later and then return. + + This function exists to guarantee that PowerActionAlreadyInProgress + errors will be raised promptly, before any work is done to power the + node on. + + :raises: PowerActionAlreadyInProgress if there's already a power + action in progress for this node. + """ + assert power_change in ('on', 'off'), ( + "Unknown power change: %s" % power_change) + # There should be one and only one power change for each system ID. + # If there's one already, raise an error. + registered_power_action = power_action_registry.get(system_id, None) + if registered_power_action is not None: + raise PowerActionAlreadyInProgress( + "Unable to change power state to '%s' for node %s: another " + "action is already in progress for that node." % + (power_change, hostname)) + power_action_registry[system_id] = power_change + + def clean_up(*args, **kwargs): + power_action_registry.pop(system_id, None) + + # Arrange for the power change to happen later; do not make the caller + # wait, because it might take a long time. + # We set a timeout of two minutes so that if the power action + # doesn't return in a timely fashion (or fails silently or + # some such) it doesn't block other actions on the node. + d = deferLater( + clock, 0, deferWithTimeout, CHANGE_POWER_STATE_TIMEOUT, + change_power_state, system_id, hostname, power_type, + power_change, context, clock) + d.addErrback(log.err) + # Whether we succeeded or failed, we need to remove the action + # from the registry of actions, otherwise every subsequent + # action will fail. + d.addBoth(clean_up) + + +@asynchronous +@inlineCallbacks +def change_power_state(system_id, hostname, power_type, power_change, context, + clock=reactor): + """Change the power state of a node. + + Monitor the result of the power change action by querying the + power state of the node and mark the node as failed if it doesn't + work. + """ + yield power_change_starting(system_id, hostname, power_change) + # Use increasing waiting times to work around race conditions + # that could arise when power-cycling the node. + for waiting_time in default_waiting_policy: + # Perform power change. + try: + yield deferToThread( + perform_power_change, system_id, hostname, power_type, + power_change, context) + except PowerActionFail: + raise + # If the power_type doesn't support querying the power state: + # exit now. + if power_type not in QUERY_POWER_TYPES: + return + # Wait to let the node some time to change its power state. + yield pause(waiting_time, clock) + # Check current power state. + new_power_state = yield deferToThread( + perform_power_change, system_id, hostname, power_type, + 'query', context) + if new_power_state == "unknown" or new_power_state == power_change: + yield power_change_success(system_id, hostname, power_change) + return + + # Failure: the power state of the node hasn't changed: mark it as + # broken. + message = "Timeout after %s tries" % len(default_waiting_policy) + yield power_change_failure(system_id, hostname, power_change, message) + + +@asynchronous +@inlineCallbacks +def power_state_update(system_id, state): + """Update a node's power state""" + client = getRegionClient() + yield client( + UpdateNodePowerState, + system_id=system_id, + power_state=state, + ) + + +@asynchronous +@inlineCallbacks +def power_query_failure(system_id, hostname, message): + """Deal with a node failing to be queried.""" + maaslog.error(message) + yield power_state_update(system_id, 'error') + yield send_event_node( + EVENT_TYPES.NODE_POWER_QUERY_FAILED, + system_id, hostname, message) + + +@synchronous +def perform_power_query(system_id, hostname, power_type, context): + """Issue the given `power_query` command. + + No exception handling is performed here, this allows + `get_power_state` to perform multiple queries and only + log the final error. + """ + action = PowerAction(power_type) + return action.execute(power_change='query', **context) + + +@asynchronous +@inlineCallbacks +def get_power_state(system_id, hostname, power_type, context, clock=reactor): + """Return the power state ('on', 'off', 'error') of the given node. + + A side-effect of calling this method is that the power state recorded in + the database is updated. + """ + if power_type not in QUERY_POWER_TYPES: + # query_all_nodes() won't call this with an un-queryable power + # type, however this is left here to prevent PEBKAC. + raise PowerActionFail("Unknown power_type '%s'" % power_type) + + # Use increasing waiting times to work around race conditions that could + # arise when power querying the node. + for waiting_time in default_waiting_policy: + error = None + # Perform power query. + try: + power_state = yield deferToThread( + perform_power_query, system_id, hostname, power_type, context) + if power_state not in ("on", "off", "unknown"): + # This is considered an error. + raise PowerActionFail(power_state) + except PowerActionFail as e: + # Hold the error so if failure after retries, we can + # log the reason. + error = e + + # Wait before trying again. + yield pause(waiting_time, clock) + continue + yield power_state_update(system_id, power_state) + returnValue(power_state) + + # Send node is broken, since query failed after the multiple retries. + message = "Node could not be queried %s (%s) %s" % ( + system_id, hostname, error) + yield power_query_failure(system_id, hostname, message) + raise PowerActionFail(error) + + +def maaslog_query_failure(node, failure): + """Log failure to query node.""" + if failure.check(PowerActionFail): + maaslog.error( + "%s: Failed to query power state: %s.", + node['hostname'], failure.getErrorMessage()) + elif failure.check(NoSuchNode): + maaslog.debug( + "%s: Could not update power status; " + "no such node.", node['hostname']) + else: + maaslog.error( + "%s: Failed to query power state, unknown error: %s", + node['hostname'], failure.getErrorMessage()) + + +def maaslog_query(node, power_state): + """Log change in power state for node.""" + if node['power_state'] != power_state: + maaslog.info( + "%s: Power state has changed from %s to %s.", node['hostname'], + node['power_state'], power_state) + return power_state + + +def _query_node(node, clock): + """Performs `power_query` on the given node.""" + # Start the query of the power state for the given node, logging to + # maaslog as errors and power states change. + d = get_power_state( + node['system_id'], node['hostname'], + node['power_type'], node['context'], + clock=clock) + d.addCallbacks( + partial(maaslog_query, node), + partial(maaslog_query_failure, node)) + return d + + +def query_all_nodes(nodes, max_concurrency=5, clock=reactor): + """Performs `power_query` on all nodes. If the nodes state has changed, + then that is sent back to the region.""" + semaphore = DeferredSemaphore(tokens=max_concurrency) + return DeferredList( + semaphore.run(_query_node, node, clock) + for node in nodes + if node['power_type'] in QUERY_POWER_TYPES) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/region.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/region.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/region.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/region.py 2015-07-10 01:27:14.000000000 +0000 @@ -16,16 +16,82 @@ __metaclass__ = type __all__ = [ + "Authenticate", + "CreateNode", + "GetArchiveMirrors", + "GetBootSources", + "GetBootSourcesV2", + "GetClusterInterfaces", + "GetClusterStatus", + "GetProxies", "Identify", + "ListNodePowerParameters", + "MarkNodeFailed", + "MonitorExpired", + "Register", + "RegisterEventType", + "ReloadCluster", "ReportBootImages", + "ReportForeignDHCPServer", + "RequestNodeInfoByMACAddress", + "SendEvent", + "SendEventMACAddress", + "UpdateLeases", + "UpdateNodePowerState", ] -from provisioningserver.rpc.common import Identify +from provisioningserver.rpc.arguments import ( + Bytes, + Choice, + CompressedAmpList, + ParsedURL, + StructureAsJSON, + ) +from provisioningserver.rpc.common import ( + Authenticate, + Identify, + ) +from provisioningserver.rpc.exceptions import ( + CannotRegisterCluster, + NodeAlreadyExists, + NodeStateViolation, + NoSuchCluster, + NoSuchEventType, + NoSuchNode, + ) from twisted.protocols import amp +class Register(amp.Command): + """Register a cluster with the region controller. + + This is the last part of the Authenticate and Register two-step. See + cluster-bootstrap_ for an explanation. + + :since: 1.7 + """ + + arguments = [ + (b"uuid", amp.Unicode()), + (b"networks", amp.AmpList([ + (b"interface", amp.Unicode()), + (b"ip", amp.Unicode()), + (b"subnet_mask", amp.Unicode()), + ], optional=True)), + # The URL for the region as seen by the cluster. + (b"url", ParsedURL(optional=True)), + ] + response = [] + errors = { + CannotRegisterCluster: b"CannotRegisterCluster", + } + + class ReportBootImages(amp.Command): - """Report boot images available on the invoking cluster controller.""" + """Report boot images available on the invoking cluster controller. + + :since: 1.5 + """ arguments = [ # The cluster UUID. @@ -38,3 +104,351 @@ ] response = [] errors = [] + + +class GetBootSources(amp.Command): + """Report boot sources and selections for the given cluster. + + :since: 1.6 + :deprecated: 1.7 + """ + + arguments = [ + # The cluster UUID. + (b"uuid", amp.Unicode()), + ] + response = [ + (b"sources", amp.AmpList( + [(b"url", amp.Unicode()), + (b"keyring_data", Bytes()), + (b"selections", amp.AmpList( + [(b"release", amp.Unicode()), + (b"arches", amp.ListOf(amp.Unicode())), + (b"subarches", amp.ListOf(amp.Unicode())), + (b"labels", amp.ListOf(amp.Unicode()))]))])), + ] + errors = [] + + +class GetBootSourcesV2(amp.Command): + """Report boot sources and selections for the given cluster. + + Includes the new os field for the selections. + + :since: 1.7 + """ + + arguments = [ + # The cluster UUID. + (b"uuid", amp.Unicode()), + ] + response = [ + (b"sources", amp.AmpList( + [(b"url", amp.Unicode()), + (b"keyring_data", Bytes()), + (b"selections", amp.AmpList( + [(b"os", amp.Unicode()), + (b"release", amp.Unicode()), + (b"arches", amp.ListOf(amp.Unicode())), + (b"subarches", amp.ListOf(amp.Unicode())), + (b"labels", amp.ListOf(amp.Unicode()))]))])), + ] + errors = [] + + +class UpdateLeases(amp.Command): + """Report DHCP leases on the invoking cluster controller. + + :since: 1.7 + """ + arguments = [ + # The cluster UUID. + (b"uuid", amp.Unicode()), + (b"mappings", CompressedAmpList( + [(b"ip", amp.Unicode()), + (b"mac", amp.Unicode())])) + ] + response = [] + errors = { + NoSuchCluster: b"NoSuchCluster", + } + + +class GetArchiveMirrors(amp.Command): + """Return the Main and Port mirrors to use. + + :since: 1.7 + """ + arguments = [] + response = [ + (b"main", ParsedURL()), + (b"ports", ParsedURL()), + ] + errors = [] + + +class GetProxies(amp.Command): + """Return the HTTP and HTTPS proxies to use. + + :since: 1.6 + """ + + arguments = [] + response = [ + (b"http", ParsedURL(optional=True)), + (b"https", ParsedURL(optional=True)), + ] + errors = [] + + +class GetClusterStatus(amp.Command): + """Return the status of the given cluster. + + :since: 1.7 + """ + + arguments = [ + # The cluster UUID. + (b"uuid", amp.Unicode()), + ] + _response_status_choices = { + 0: b"PENDING", # NODEGROUP_STATUS.PENDING + 1: b"ACCEPTED", # NODEGROUP_STATUS.ACCEPTED + 2: b"REJECTED", # NODEGROUP_STATUS.REJECTED + } + response = [ + (b"status", Choice(_response_status_choices)), + ] + errors = { + NoSuchCluster: b"NoSuchCluster", + } + + +class MarkNodeFailed(amp.Command): + """Mark a node as 'broken'. + + :since: 1.7 + """ + + arguments = [ + # The node's system_id. + (b"system_id", amp.Unicode()), + # The error description. + (b"error_description", amp.Unicode()), + ] + response = [] + errors = { + NodeStateViolation: b"NodeStateViolation", + NoSuchNode: b"NoSuchNode", + } + + +class ListNodePowerParameters(amp.Command): + """Return the list of power parameters for nodes + that this cluster controls. + + Used to query all of the nodes that the cluster + composes. + + :since: 1.7 + """ + + arguments = [ + # The cluster UUID. + (b"uuid", amp.Unicode()), + ] + response = [ + (b"nodes", amp.AmpList( + [(b"system_id", amp.Unicode()), + (b"hostname", amp.Unicode()), + (b"power_state", amp.Unicode()), + (b"power_type", amp.Unicode()), + # We can't define a tighter schema here because this is a highly + # variable bag of arguments from a variety of sources. + (b"context", StructureAsJSON())])), + ] + errors = { + NoSuchCluster: b"NoSuchCluster", + } + + +class UpdateNodePowerState(amp.Command): + """Update Node Power State. + + :since: 1.7 + """ + + arguments = [ + # The node's system_id. + (b"system_id", amp.Unicode()), + # The node's power_state. + (b"power_state", amp.Unicode()), + ] + response = [] + errors = {NoSuchNode: b"NoSuchNode"} + + +class RegisterEventType(amp.Command): + """Register an event type. + + :since: 1.7 + """ + + arguments = [ + (b"name", amp.Unicode()), + (b"description", amp.Unicode()), + (b"level", amp.Integer()), + ] + response = [] + errors = [] + + +class SendEvent(amp.Command): + """Send an event. + + :since: 1.7 + """ + + arguments = [ + (b"system_id", amp.Unicode()), + (b"type_name", amp.Unicode()), + (b"description", amp.Unicode()), + ] + response = [] + errors = { + NoSuchNode: b"NoSuchNode", + NoSuchEventType: b"NoSuchEventType" + } + + +class SendEventMACAddress(amp.Command): + """Send an event. + + :since: 1.7 + """ + + arguments = [ + (b"mac_address", amp.Unicode()), + (b"type_name", amp.Unicode()), + (b"description", amp.Unicode()), + ] + response = [] + errors = { + NoSuchNode: b"NoSuchNode", + NoSuchEventType: b"NoSuchEventType" + } + + +class ReportForeignDHCPServer(amp.Command): + """Report a foreign DHCP server on the cluster's network. + + :since: 1.7 + """ + + arguments = [ + (b"cluster_uuid", amp.Unicode()), + (b"interface_name", amp.Unicode()), + (b"foreign_dhcp_ip", amp.Unicode(optional=True)), + ] + response = [] + errors = [] + + +class GetClusterInterfaces(amp.Command): + """Fetch the known interfaces for a cluster from the region. + + :since: 1.7 + """ + + arguments = [ + (b"cluster_uuid", amp.Unicode()), + ] + response = [ + (b"interfaces", amp.AmpList( + [(b"name", amp.Unicode()), + (b"interface", amp.Unicode()), + (b"ip", amp.Unicode())])) + ] + errors = [] + + +class CreateNode(amp.Command): + """Create a node on a given cluster. + + :since: 1.7 + """ + + arguments = [ + (b'cluster_uuid', amp.Unicode()), + (b'architecture', amp.Unicode()), + (b'power_type', amp.Unicode()), + (b'power_parameters', amp.Unicode()), + (b'mac_addresses', amp.ListOf(amp.Unicode())), + ] + response = [ + (b'system_id', amp.Unicode()), + ] + errors = { + NodeAlreadyExists: b"NodeAlreadyExists", + } + + +class MonitorExpired(amp.Command): + """Called by a cluster when a running monitor hits its deadline. + + The original context parameter from the StartMonitors call is returned. + + :since: 1.7 + """ + + arguments = [ + (b"id", amp.Unicode()), + (b"context", StructureAsJSON()), + ] + response = [] + errors = [] + + +class ReloadCluster(amp.Command): + """Called by a cluster when it wants to reload its state. + + The region may respond with many different calls to the cluster + that will give it all the information it needs to restore state (for + example when it got restarted). + + For example, the region will restore all the timers in the cluster, + so none or many StartTimer calls may be received after the cluster + issues this command. + + :since: 1.7 + """ + + arguments = [ + (b"cluster_uuid", amp.Unicode()), + ] + response = [] + errors = [] + + +class RequestNodeInfoByMACAddress(amp.Command): + """Request Node information by mac address. + + :since: 1.7 + """ + + arguments = [ + (b"mac_address", amp.Unicode()), + ] + response = [ + (b"system_id", amp.Unicode()), + (b"hostname", amp.Unicode()), + (b"status", amp.Integer()), + (b"boot_type", amp.Unicode()), + (b"osystem", amp.Unicode()), + (b"distro_series", amp.Unicode()), + (b"architecture", amp.Unicode()), + (b"purpose", amp.Unicode()), + ] + errors = { + NoSuchNode: b"NoSuchNode", + } diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tags.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tags.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tags.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,46 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""RPC helpers for dealing with tags.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "evaluate_tag", +] + +from apiclient.maas_client import ( + MAASClient, + MAASDispatcher, + MAASOAuth, + ) +from provisioningserver.cluster_config import ( + get_cluster_uuid, + get_maas_url, + ) +from provisioningserver.tags import process_node_tags +from provisioningserver.utils.twisted import synchronous + + +@synchronous +def evaluate_tag(tag_name, tag_definition, tag_nsmap, credentials): + """Evaluate `tag_definition` against this cluster's nodes' details. + + :param tag_name: The name of the tag, used for logging. + :param tag_definition: The XPath expression of the tag. + :param tag_nsmap: The namespace map as used by LXML's ETree library. + :param credentials: A 3-tuple of OAuth credentials. + """ + client = MAASClient( + auth=MAASOAuth(*credentials), dispatcher=MAASDispatcher(), + base_url=get_maas_url()) + process_node_tags( + tag_name=tag_name, tag_definition=tag_definition, tag_nsmap=tag_nsmap, + client=client, nodegroup_uuid=get_cluster_uuid()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/doubles.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/doubles.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/doubles.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/doubles.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,8 +14,10 @@ __metaclass__ = type __all__ = [ "DummyConnection", + "StubOS", ] +from provisioningserver.drivers.osystem import OperatingSystem from provisioningserver.rpc.interfaces import IConnection from zope.interface import implementer @@ -26,3 +28,71 @@ Implements `IConnection`. """ + + +class StubOS(OperatingSystem): + """An :py:class:`OperatingSystem` subclass that has canned answers. + + - The name is capitalised to derive the title. + + - The first release is the default. + + - Odd releases (in the order they're specified) require license + keys. + + """ + + name = title = None + + def __init__(self, name, releases): + """ + :param name: A string name, usually all lowercase. + :param releases: A list of (name, title) tuples. + """ + super(StubOS, self).__init__() + self.name = name + self.title = name.capitalize() + self.releases = releases + + def is_release_supported(self, release): + return release in self.releases + + def get_supported_releases(self): + return [name for name, _ in self.releases] + + def get_default_release(self): + if len(self.releases) == 0: + return None + else: + name, _ = self.releases[0] + return name + + def get_release_title(self, release): + for name, title in self.releases: + if name == release: + return title + else: + return None + + def format_release_choices(self): + raise NotImplementedError() + + def get_boot_image_purposes(self, arch, subarch, release, label): + raise NotImplementedError() + + def requires_license_key(self, release): + for index, (name, _) in enumerate(self.releases): + if name == release: + return index % 2 == 1 + else: + return False + + def get_default_commissioning_release(self): + if len(self.releases) >= 2: + name, _ = self.releases[1] + return name + else: + return None + + def get_supported_commissioning_releases(self): + return [name for name, _ in self.releases[1:3]] diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,23 +13,80 @@ __metaclass__ = type __all__ = [ + "always_fail_with", + "always_succeed_with", "are_valid_tls_parameters", "call_responder", + "make_amp_protocol_factory", + "MockClusterToRegionRPCFixture", + "MockLiveClusterToRegionRPCFixture", "TwistedLoggerFixture", ] +from abc import ( + ABCMeta, + abstractmethod, + ) import collections +from copy import copy +import itertools import operator +from os import path -from fixtures import Fixture +import fixtures +from fixtures import ( + EnvironmentVariable, + Fixture, + ) +from maastesting.factory import factory +from maastesting.fixtures import TempDirectory +from mock import ( + Mock, + sentinel, + ) +import provisioningserver +from provisioningserver.rpc import region +from provisioningserver.rpc.clusterservice import ( + Cluster, + ClusterClient, + ClusterClientService, + ) +from provisioningserver.rpc.common import RPCProtocol +from provisioningserver.rpc.testing.tls import get_tls_parameters_for_region +from provisioningserver.security import ( + get_shared_secret_from_filesystem, + set_shared_secret_on_filesystem, + ) +from provisioningserver.utils.twisted import ( + asynchronous, + callOut, + ) +from testtools.deferredruntest import extract_result from testtools.matchers import ( AllMatch, IsInstance, MatchesAll, MatchesDict, ) -from twisted.internet import ssl -from twisted.python import log +from twisted.internet import ( + defer, + endpoints, + reactor, + ssl, + ) +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, + ) +from twisted.internet.protocol import Factory +from twisted.internet.task import Clock +from twisted.protocols import amp +from twisted.python import ( + log, + reflect, + ) +from twisted.python.failure import Failure +from twisted.test import iosim def call_responder(protocol, command, arguments): @@ -41,6 +98,24 @@ arguments = command.makeArguments(arguments, protocol) d = responder(arguments) d.addCallback(command.parseResponse, protocol) + + def eb_massage_error(error): + if error.check(amp.RemoteAmpError): + # Convert remote errors back into local errors using the + # command's error map if possible. + error_type = command.reverseErrors.get( + error.value.errorCode, amp.UnknownRemoteError) + return Failure(error_type(error.value.description)) + else: + # Exceptions raised in responders that aren't declared in that + # responder's schema can get through to here without being wrapped + # in RemoteAmpError. This is because call_responder() bypasses the + # network marshall/unmarshall steps, where these exceptions would + # ordinarily get squashed. + return Failure(amp.UnknownRemoteError("%s: %s" % ( + reflect.qual(error.type), reflect.safe_str(error.value)))) + d.addErrback(eb_massage_error) + return d @@ -59,11 +134,15 @@ return "\n---\n".join( log.textFromEventDict(event) for event in self.logs) + # For compatibility with fixtures.FakeLogger. + output = property(dump) + + def containsError(self): + return any(log["isError"] for log in self.logs) + def setUp(self): super(TwistedLoggerFixture, self).setUp() self.addCleanup( - operator.setitem, self.logs, slice(None), []) - self.addCleanup( operator.setitem, log.theLogPublisher.observers, slice(None), log.theLogPublisher.observers[:]) log.theLogPublisher.observers[:] = [self.logs.append] @@ -76,3 +155,395 @@ AllMatch(IsInstance(ssl.Certificate)), ), }) + + +class MockClusterToRegionRPCFixtureBase(fixtures.Fixture): + """Patch in a stub region RPC implementation to enable end-to-end testing. + + This is an abstract base class. Derive concrete fixtures from this by + implementing the `connect` method. + """ + + __metaclass__ = ABCMeta + + starting = None + stopping = None + + def checkServicesClean(self): + # If services are running, what do we do with any existing RPC + # service? Do we shut it down and patch in? Do we just patch in and + # move the running service aside? If it's not running, do we patch + # into it without moving it aside? For now, keep it simple and avoid + # these questions by requiring that services are stopped and that no + # RPC service is globally registered. + if provisioningserver.services.running: + raise AssertionError( + "Please ensure that cluster services are *not* running " + "before using this fixture.") + if "rpc" in provisioningserver.services.namedServices: + raise AssertionError( + "Please ensure that no RPC service is registered globally " + "before using this fixture.") + + def asyncStart(self): + # Check that no cluster services are running and that there's no RPC + # service already registered. + self.checkServicesClean() + # Patch it into the global services object. + self.rpc_service.setName("rpc") + self.rpc_service.setServiceParent(provisioningserver.services) + # Pretend event-loops only exist for those connections that already + # exist. The chicken-and-egg will be resolved by injecting a + # connection later on. + self.rpc_service._get_rpc_info_url = self._get_rpc_info_url + self.rpc_service._fetch_rpc_info = self._fetch_rpc_info + # Finally, start the service. If the clock is advanced, this will do + # its usual update() calls, but we've patched out _get_rpc_info_url + # and _fetch_rpc_info so no traffic will result. + self.starting = defer.maybeDeferred(self.rpc_service.startService) + + def asyncStop(self): + if self.starting is None: + # Nothing to do; it never started. + self.stopping = defer.succeed(None) + else: + self.starting.cancel() + self.stopping = defer.maybeDeferred( + self.rpc_service.disownServiceParent) + # Ensure the cluster's services will be left in a consistent state. + self.stopping.addCallback(callOut(self.checkServicesClean)) + + @asynchronous(timeout=15) + def setUp(self): + super(MockClusterToRegionRPCFixtureBase, self).setUp() + # Ensure that we have MAAS_URL and CLUSTER_UUID set. + self.useFixture(EnvironmentVariable( + "MAAS_URL", factory.make_simple_http_url())) + self.useFixture(EnvironmentVariable( + "CLUSTER_UUID", factory.make_UUID().encode("ascii"))) + # Use an inert clock with ClusterClientService so it doesn't update + # itself except when we ask it to. + self.rpc_service = ClusterClientService(Clock()) + # Start up, but schedule stop first. + self.addCleanup(self.asyncStop) + self.asyncStart() + # Return the Deferred so that callers from threads outside of the + # reactor will block. In the reactor thread, a supporting test + # framework may know how to handle this sanely. + return self.starting + + @asynchronous(timeout=15) + def cleanUp(self): + super(MockClusterToRegionRPCFixtureBase, self).cleanUp() + # Return the Deferred so that callers from threads outside of the + # reactor will block. In the reactor thread, a supporting test + # framework may know how to handle this sanely. + return self.stopping + + def getEventLoopName(self, protocol): + """Return `protocol`'s event-loop name. + + If one has not been set already, one is generated and saved as + `protocol.ident`. + """ + try: + return protocol.ident + except AttributeError: + protocol.ident = factory.make_name("eventloop") + return protocol.ident + + def ensureSharedSecret(self): + """Make sure the shared-secret is set.""" + if get_shared_secret_from_filesystem() is None: + set_shared_secret_on_filesystem(factory.make_bytes()) + + @asynchronous(timeout=5) + def addEventLoop(self, protocol): + """Add a new stub event-loop using the given `protocol`. + + The `protocol` should be an instance of `amp.AMP`. + + :return: py:class:`twisted.test.iosim.IOPump` + """ + eventloop = self.getEventLoopName(protocol) + address = factory.make_ipv4_address(), factory.pick_port() + client = ClusterClient(address, eventloop, self.rpc_service) + return self.connect(client, protocol) + + def makeEventLoop(self, *commands): + """Make and add a new stub event-loop for the given `commands`. + + See `make_amp_protocol_factory` for details. + """ + if region.Identify not in commands: + commands = commands + (region.Identify,) + if region.Authenticate not in commands: + commands = commands + (region.Authenticate,) + if region.Register not in commands: + commands = commands + (region.Register,) + if amp.StartTLS not in commands: + commands = commands + (amp.StartTLS,) + protocol_factory = make_amp_protocol_factory(*commands) + protocol = protocol_factory() + eventloop = self.getEventLoopName(protocol) + protocol.Identify.return_value = {"ident": eventloop} + protocol.Authenticate.side_effect = self._authenticate_with_cluster_key + protocol.Register.side_effect = always_succeed_with({}) + protocol.StartTLS.return_value = get_tls_parameters_for_region() + return protocol, self.addEventLoop(protocol) + + @abstractmethod + def connect(self, cluster, region): + """Wire up a connection between cluster and region. + + :type cluster: `twisted.internet.interfaces.IProtocol` + :type region: `twisted.internet.interfaces.IProtocol` + :return: ... + """ + + def _get_rpc_info_url(self): + """Patch-in for `ClusterClientService._get_rpc_info_url`. + + Returns a dummy value. + """ + return sentinel.url + + def _fetch_rpc_info(self, url): + """Patch-in for `ClusterClientService._fetch_rpc_info`. + + Describes event-loops only for those event-loops already known to the + service, thus new connections must be injected into the service. + """ + connections = self.rpc_service.connections.viewitems() + return { + "eventloops": { + eventloop: [client.address] + for eventloop, client in connections + }, + } + + def _authenticate_with_cluster_key(self, protocol, message): + """Patch-in for `Authenticate` calls. + + This ought to always return the correct digest because it'll be using + the same shared-secret as the cluster. + """ + self.ensureSharedSecret() + return Cluster().authenticate(message) + + +class MockClusterToRegionRPCFixture(MockClusterToRegionRPCFixtureBase): + """Patch in a stub region RPC implementation to enable end-to-end testing. + + Use this in *cluster* tests when you're not running with a reactor, or + when you need fine-grained control over IO. This has low overhead and is + useful for writing tests where there are obvious points where you can pump + IO "by hand". + + Example usage (assuming `inlineCallbacks`):: + + fixture = self.useFixture(MockClusterToRegionRPCFixture()) + yield fixture.starting # Wait for the fixture to start. + + protocol, io = fixture.makeEventLoop(region.Identify) + protocol.Identify.return_value = defer.succeed({"ident": "foobar"}) + + client = getRegionClient() + result = client(region.Identify) + io.flush() # Call this in the reactor thread. + + self.assertThat(result, ...) + + """ + + def connect(self, cluster, region): + """Wire up a connection between cluster and region. + + :type cluster: `twisted.internet.interfaces.IProtocol` + :type region: `twisted.internet.interfaces.IProtocol` + :return: py:class:`twisted.test.iosim.IOPump` + """ + return iosim.connect( + region, iosim.makeFakeServer(region), + cluster, iosim.makeFakeClient(cluster), + debug=False, # Debugging is useful, but too noisy by default. + ) + + +class MockLiveClusterToRegionRPCFixture(MockClusterToRegionRPCFixtureBase): + """Patch in a stub region RPC implementation to enable end-to-end testing. + + This differs from `MockClusterToRegionRPCFixture` in that the connections + between the region and the cluster are _live_, by which I mean that + they're connected by reactor-managed IO, rather than by an `IOPump`. This + means that the reactor must be running in order to use this fixture. + + Use this in *cluster* tests where the reactor is running, for example when + using `MAASTwistedRunTest` or its siblings. There's a slightly greater + overhead than when using `MockClusterToRegionRPCFixture`, but it's not + huge. You must be careful to follow the usage instructions otherwise + you'll be plagued by dirty reactor errors. + + Example usage (assuming `inlineCallbacks`):: + + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop(region.Identify) + protocol.Identify.return_value = defer.succeed({"ident": "foobar"}) + + # This allows the connections to get established via IO through the + # reactor. The result of `connecting` is a callable that arranges for + # the correct shutdown of the connections being established. + self.addCleanup((yield connecting)) + + client = getRegionClient() + result = yield client(region.Identify) + self.assertThat(result, ...) + + """ + + def setUp(self): + self.sockdir = TempDirectory() # Place for UNIX sockets. + self.socknames = itertools.imap(unicode, itertools.count(1)) + return super(MockLiveClusterToRegionRPCFixture, self).setUp() + + def asyncStart(self): + super(MockLiveClusterToRegionRPCFixture, self).asyncStart() + + def started(result): + self.sockdir.setUp() + return result + + self.starting.addCallback(started) + + def asyncStop(self): + super(MockLiveClusterToRegionRPCFixture, self).asyncStop() + + def stopped(result): + self.sockdir.cleanUp() + return result + + self.stopping.addCallback(stopped) + + @inlineCallbacks + def connect(self, cluster, region): + """Wire up a connection between cluster and region. + + Uses a UNIX socket to very rapidly connect the two ends. + + :type cluster: `twisted.internet.interfaces.IProtocol` + :type region: `twisted.internet.interfaces.IProtocol` + """ + # Wire up the region and cluster protocols via the sockfile. + sockfile = path.join(self.sockdir.path, next(self.socknames)) + + class RegionFactory(Factory): + def buildProtocol(self, addr): + return region + + endpoint_region = endpoints.UNIXServerEndpoint(reactor, sockfile) + port = yield endpoint_region.listen(RegionFactory()) + + endpoint_cluster = endpoints.UNIXClientEndpoint(reactor, sockfile) + client = yield endpoints.connectProtocol(endpoint_cluster, cluster) + + # Wait for the client to be fully connected. Because onReady will have + # been capped-off by now (see ClusterClient.connectionMade) this will + # not raise any exceptions. In some ways this is convenient because it + # allows the resulting issues to be encountered within test code. + yield client.ready.get() + + @inlineCallbacks + def shutdown(): + # We need to make sure that everything is shutdown correctly. TLS + # seems to make this even more important: it complains loudly if + # connections are not closed cleanly. An interesting article to + # read now is Jono Lange's "How to Disconnect in Twisted, Really" + # . + yield port.loseConnection() + yield port.deferred + if region.transport is not None: + yield region.transport.loseConnection() + yield region.onConnectionLost + if client.transport is not None: + yield client.transport.loseConnection() + yield client.onConnectionLost + + # Fixtures don't wait for deferred work in clean-up tasks (or anywhere + # else), so we can't use `self.addCleanup(shutdown)` here. We need to + # get the user to add `shutdown` to the clean-up tasks for the *test*, + # on the assumption they're using a test framework that accommodates + # deferred work (like testtools with `MAASTwistedRunTest`). + returnValue(shutdown) + + +# An iterable of names for new dynamically-created AMP protocol factories. +amp_protocol_factory_names = ( + "AMPTestProtocol#%d".encode("ascii") % seq + for seq in itertools.count(1)) + + +def make_amp_protocol_factory(*commands): + """Make a new protocol factory based on `RPCProtocol`.""" + + def __init__(self): + super(cls, self).__init__() + self._commandDispatch = self._commandDispatch.copy() + for command in commands: + # Get a class-level responder, if set. + responder = getattr(self, command.commandName, None) + if responder is None: + # There's no class-level responder, so create an + # instance-level responder using a Mock. + responder = Mock(name=command.commandName) + setattr(self, command.commandName, responder) + # Register whichever responder we've found. + self._commandDispatch[command.commandName] = (command, responder) + + name = next(amp_protocol_factory_names) + cls = type(name, (RPCProtocol,), {"__init__": __init__}) + + return cls + + +def always_succeed_with(result): + """Return a callable that always returns a successful Deferred. + + The callable allows (and ignores) all arguments, and returns a shallow + `copy` of `result`. + """ + def always_succeed(*args, **kwargs): + return defer.succeed(copy(result)) + return always_succeed + + +def always_fail_with(result): + """Return a callable that always returns a failed Deferred. + + The callable allows (and ignores) all arguments, and returns a shallow + `copy` of `result`. + """ + def always_fail(*args, **kwargs): + return defer.fail(copy(result)) + return always_fail + + +def capture_result(d): + """Capture a result from a `Deferred` mid-flight. + + Rather than at the end of a callback chain. + + :type d: :py:class:`defer.Deferred`. + :return: A no-argument callable that will extract the current result from + the given `Deferred`, or raise an exception if it has not yet fired. + See py:func:`extract_result`. + """ + # We don't need to use a Deferred here, but it's convenient because it + # pairs well with extract_result(). + dest = defer.Deferred() + + def capture(result): + dest.callback(result) + return result + d.addBoth(capture) + + return lambda: extract_result(dest) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/region.crt maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/region.crt --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/region.crt 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/region.crt 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,45 @@ +-----BEGIN CERTIFICATE----- +MIICtDCCAZwCAQEwDQYJKoZIhvcNAQEEBQAwIDEeMBwGA1UEAxQVTUFBUyBSZWdp +b24gKlRFU1RJTkcqMB4XDTE0MDIyNjE3NDEyMFoXDTE1MDIyNjE3NDEyMFowIDEe +MBwGA1UEAxQVTUFBUyBSZWdpb24gKlRFU1RJTkcqMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA1q+LGY7CWiHjOGTBMvvhSK7/ke/fX0TXwxb8nO/1tscO +iaWjVfIgLVnPMDZOF38BoYjjcGIjNH2/W4kypqpuqoyST7fjN0plin7zmNZwFGKy +BRBKbbPi+jTYy3FXm81pHmJLnJwbfUlKFO9M/sZUDu3QFYrMyo1m8p3/wjFD3+vy +S3HXdbw7FuT7tSoIY7SPYAYjMuH0goT914d2O4ZblO1DDRsnvixyAIqF+gX0nF5/ +rOlv/QU1rEE9uCUxYhSoB97qoUeETd2axeQa8NSUCw49cAjRlXAz0+y426c8II6O +5xuX42JXYA1tFfiBdSByWOYs+fPUNOLlk+oUN/MoGwIDAQABMA0GCSqGSIb3DQEB +BAUAA4IBAQBA3N9gZWIymXqlBMEmN4rV7CWrgfeG6kioDtE2F5HMLVEQ9BuFTLz4 +QK+G+N52lUjkuS2GUvA+7nlzGyoVjAdLqj5gPgAPe4kWheueyRsaxrH5rU+0KB1K +o3RWS9pSPbugkFU27jM4vumOz2ua1+xxZFsC+mPsYccf1LnhZhp/iGLnueQ1CkSJ +bQuopk66GlookZqvN+wUo5bjh3/8NccNnmtuuR5rv/Xy1k/+Vk2lIDF8dE65MR1V +4arGvtmK1i8iHIKIJ0nmk0Y6SppN+3KjeB4iPpQKBdc/s8EiL3WZqVARx47lRGxD +Bj9FjbyUAlkl9PQHoZW/2lPpTQXTqFj4 +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWr4sZjsJaIeM4 +ZMEy++FIrv+R799fRNfDFvyc7/W2xw6JpaNV8iAtWc8wNk4XfwGhiONwYiM0fb9b +iTKmqm6qjJJPt+M3SmWKfvOY1nAUYrIFEEpts+L6NNjLcVebzWkeYkucnBt9SUoU +70z+xlQO7dAViszKjWbynf/CMUPf6/JLcdd1vDsW5Pu1KghjtI9gBiMy4fSChP3X +h3Y7hluU7UMNGye+LHIAioX6BfScXn+s6W/9BTWsQT24JTFiFKgH3uqhR4RN3ZrF +5Brw1JQLDj1wCNGVcDPT7Ljbpzwgjo7nG5fjYldgDW0V+IF1IHJY5iz589Q04uWT +6hQ38ygbAgMBAAECggEAW1ARBw7chX+yaJMaRbgyqzqbw5PWW2wppXYKfinmRhbG +jS9hmLXCj+eKI4SFlKLVq8JQksV7GeF+Wc2yOId3SJ0/wh6By0uegtjafaB/zXvp +IhQ6xAxmN2vw5h9QVxl4Y48FgBg57QEWPG9IPXlX2X23KuJ9lo2sQveHCC7yIrQu +Ns/ywgtrEBwcK9nSsdNR8OWlwtMRzg8ZuBCBIADiWdw0H3FRfjcyI9+qEas59Vaj +fGh/NIg0rYLTmYdbGHY37v+Scsmn578yj6qKfVQCHpsXn9IfV4jyyjcHMKy54DAB +mcJSEgCjPe2OGrqHZgDJIo4/BAywXNSOKLP7UA6ZCQKBgQDxltc43LoKhi88rFFo +6XrM//F19nMuCQoPX8aYFNJiD38E8Lhb3I2e2k7p/5oX2nQe0pfYrUlEoIcHWN3k +nKR0H5MYJHJO+pbEO2Qnpy7radROKhkMl8HcUCr95NOk71naIEY6RMVbdbp5BQii +4tcGPF1Ti1xKoJXszk9tldNjTwKBgQDjfeA6WMZpNzPWhvfX0tmGFyoriY0V3ieN +pmodhjlnWBgSFjN9r2DeA2HEzCHJ7OZSGAsniNOPf1tQiKJCn/AOiUsxkJYb3Svf +FVAXgpBnNSFf+nA9sDZoKduPkGGh2v+D5Kd3YvLJXLhTEg+PzkiDB/73g/435LAO +djex2K+rdQKBgQDf3Y/4AAlhITB6f488sUX62pysW8Alg3jQAEctu/WrqoJgWJPt +g7Wz4sXHbfQjcGmz+h99SC4hqsMVGtMbOc/mTy/l6jgGWY0Fr6dCW1hOCxYRjE8D +7UhV1+/Or5EzQBw51TheXEWpV7GA3RpAngT0oVE1zmiqOp5S/mZ5vKFSXwKBgHqE +8J578V/5OxHu3sx94mk1UKDiE/oTC3pehgggognmiBA1pMWsDp+DcsRqrEf0LpdI +G7nPFyHRlXxqGfmH6eHqT3UCSdX36AjdkaUXzG3JT5BBcPHIVYUEBhvrxqlFKRf/ +rbG+uMN6DEuxDnCEyMjJJahojiHMKIWhZZ2C9hmBAoGAAQKXcOPdU3HGUBOihCf1 ++Sg9sabFnjSb7TZlFZ6zuf+XxX4+gOB/01tSu048SBsc8kiVXHW0YzuhRM2dkbsd +2G0tmDhM1aqEvVCJJIimZ4FOZRKK/WDub0R//j9iM7IpNvuPIju1MsU+CIo2Ip9B +gdiTpWUJj/Qw7vvpREmQPI4= +-----END PRIVATE KEY----- diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/tls.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/tls.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/testing/tls.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/testing/tls.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,47 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test helpers for TLS negotiation with AMP.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "get_tls_parameters_for_cluster", + "get_tls_parameters_for_region", + ] + +from functools import partial + +from twisted.internet import ssl +from twisted.python import filepath + + +def get_tls_parameters(private_cert_name, trust_cert_name): + """get_tls_parameters() + + Implementation of :py:class:`~twisted.protocols.amp.StartTLS`. + """ + testing = filepath.FilePath(__file__).parent() + with testing.child(private_cert_name).open() as fin: + tls_localCertificate = ssl.PrivateCertificate.loadPEM(fin.read()) + with testing.child(trust_cert_name).open() as fin: + tls_verifyAuthorities = [ + ssl.Certificate.loadPEM(fin.read()), + ] + return { + "tls_localCertificate": tls_localCertificate, + "tls_verifyAuthorities": tls_verifyAuthorities, + } + + +get_tls_parameters_for_cluster = partial( + get_tls_parameters, "cluster.crt", "trust.crt") +get_tls_parameters_for_region = partial( + get_tls_parameters, "region.crt", "trust.crt") diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_arguments.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_arguments.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_arguments.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_arguments.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,12 +14,61 @@ __metaclass__ = type __all__ = [] +import random +import zlib + +from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.rpc import arguments +from testtools import ExpectedException from testtools.matchers import ( Equals, IsInstance, + LessThan, ) +from twisted.protocols import amp + + +class TestBytes(MAASTestCase): + + def test_round_trip(self): + argument = arguments.Bytes() + example = factory.make_bytes() + encoded = argument.toString(example) + self.assertThat(encoded, IsInstance(bytes)) + decoded = argument.fromString(encoded) + self.assertThat(decoded, Equals(example)) + + def test_error_when_input_is_not_a_byte_string(self): + with ExpectedException(TypeError, "^Not a byte string: <.*"): + arguments.Bytes().toString(object()) + + +class TestChoice(MAASTestCase): + + def test_round_trip(self): + choices = { + factory.make_name("name"): factory.make_bytes() + for _ in xrange(10) + } + argument = arguments.Choice(choices) + choice = random.choice(list(choices)) + encoded = argument.toString(choice) + self.assertThat(encoded, IsInstance(bytes)) + decoded = argument.fromString(encoded) + self.assertThat(decoded, Equals(choice)) + + def test_error_when_input_is_not_in_choices(self): + with ExpectedException(KeyError, "^ 1. "num_connections": 0, "expected": ClusterClientService.INTERVAL_LOW, }), ("fewer-connections-than-event-loops", { + "time_running": 1000, "num_eventloops": 2, # anything > num_connections. "num_connections": 1, # anything > 0. "expected": ClusterClientService.INTERVAL_MID, }), ("default", { + "time_running": 1000, "num_eventloops": 3, # same as num_connections. "num_connections": 3, # same as num_eventloops. "expected": ClusterClientService.INTERVAL_HIGH, @@ -520,6 +783,7 @@ def test__calculate_interval(self): service = self.make_inert_client_service() service.startService() + service.clock.advance(self.time_running) self.assertEqual( self.expected, service._calculate_interval( self.num_eventloops, self.num_connections)) @@ -527,7 +791,14 @@ class TestClusterClient(MAASTestCase): - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def setUp(self): + super(TestClusterClient, self).setUp() + self.useFixture(EnvironmentVariable( + "MAAS_URL", factory.make_simple_http_url())) + self.useFixture(EnvironmentVariable( + "CLUSTER_UUID", factory.make_UUID().encode("ascii"))) def make_running_client(self): client = clusterservice.ClusterClient( @@ -536,8 +807,35 @@ client.service.running = True return client + def patch_authenticate_for_success(self, client): + authenticate = self.patch_autospec(client, "authenticateRegion") + authenticate.side_effect = always_succeed_with(True) + + def patch_authenticate_for_failure(self, client): + authenticate = self.patch_autospec(client, "authenticateRegion") + authenticate.side_effect = always_succeed_with(False) + + def patch_authenticate_for_error(self, client, exception): + authenticate = self.patch_autospec(client, "authenticateRegion") + authenticate.side_effect = always_fail_with(exception) + + def patch_register_for_success(self, client): + register = self.patch_autospec(client, "registerWithRegion") + register.side_effect = always_succeed_with(True) + + def patch_register_for_failure(self, client): + register = self.patch_autospec(client, "registerWithRegion") + register.side_effect = always_succeed_with(False) + + def patch_register_for_error(self, client, exception): + register = self.patch_autospec(client, "registerWithRegion") + register.side_effect = always_fail_with(exception) + def test_interfaces(self): client = self.make_running_client() + # transport.getHandle() is used by AMP._getPeerCertificate, which we + # call indirectly via the peerCertificate attribute in IConnection. + self.patch(client, "transport") verifyObject(IConnection, client) def test_ident(self): @@ -547,8 +845,19 @@ def test_connecting(self): client = self.make_running_client() + self.patch_authenticate_for_success(client) + self.patch_register_for_success(client) self.assertEqual(client.service.connections, {}) + wait_for_authenticated = client.authenticated.get() + self.assertThat(wait_for_authenticated, IsUnfiredDeferred()) + wait_for_ready = client.ready.get() + self.assertThat(wait_for_ready, IsUnfiredDeferred()) client.connectionMade() + # authenticated has been set to True, denoting a successfully + # authenticated region. + self.assertTrue(extract_result(wait_for_authenticated)) + # ready has been set with the name of the event-loop. + self.assertEqual(client.eventloop, extract_result(wait_for_ready)) self.assertEqual( client.service.connections, {client.eventloop: client}) @@ -564,6 +873,13 @@ transport.protocol = client client.makeConnection(transport) + # authenticated was set to None to signify that authentication was not + # attempted. + self.assertIsNone(extract_result(client.authenticated.get())) + # ready was set with KeyError to signify that a connection to the + # same event-loop already existed. + self.assertRaises(KeyError, extract_result, client.ready.get()) + # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual( @@ -581,11 +897,139 @@ transport.protocol = client client.makeConnection(transport) + # authenticated was set to None to signify that authentication was not + # attempted. + self.assertIsNone(extract_result(client.authenticated.get())) + # ready was set with RuntimeError to signify that the client + # service was not running. + self.assertRaises(RuntimeError, extract_result, client.ready.get()) + + # The connections list is unchanged because the new connection + # immediately disconnects. + self.assertEqual(client.service.connections, {}) + self.assertFalse(client.connected) + + def test_disconnects_when_authentication_fails(self): + client = self.make_running_client() + self.patch_authenticate_for_failure(client) + self.patch_register_for_success(client) + + # Connect via an in-memory transport. + transport = StringTransportWithDisconnection() + transport.protocol = client + client.makeConnection(transport) + + # authenticated was set to False. + self.assertIs(False, extract_result(client.authenticated.get())) + # ready was set with AuthenticationFailed. + self.assertRaises( + exceptions.AuthenticationFailed, extract_result, + client.ready.get()) + + # The connections list is unchanged because the new connection + # immediately disconnects. + self.assertEqual(client.service.connections, {}) + self.assertFalse(client.connected) + + def test_disconnects_when_authentication_errors(self): + client = self.make_running_client() + exception_type = factory.make_exception_type() + self.patch_authenticate_for_error(client, exception_type()) + self.patch_register_for_success(client) + + logger = self.useFixture(TwistedLoggerFixture()) + + # Connect via an in-memory transport. + transport = StringTransportWithDisconnection() + transport.protocol = client + client.makeConnection(transport) + + # authenticated errbacks with the error. + self.assertRaises( + exception_type, extract_result, client.authenticated.get()) + # ready also errbacks with the same error. + self.assertRaises( + exception_type, extract_result, client.ready.get()) + + # The log was written to. + self.assertDocTestMatches( + """... + Event-loop 'eventloop:pid=12345' handshake failed; + dropping connection. + Traceback (most recent call last):... + """, + logger.dump()) + # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual(client.service.connections, {}) self.assertFalse(client.connected) + def test_disconnects_when_registration_fails(self): + client = self.make_running_client() + self.patch_authenticate_for_success(client) + self.patch_register_for_failure(client) + + # Connect via an in-memory transport. + transport = StringTransportWithDisconnection() + transport.protocol = client + client.makeConnection(transport) + + # authenticated was set to True because it succeeded. + self.assertIs(True, extract_result(client.authenticated.get())) + # ready was set with AuthenticationFailed. + self.assertRaises( + exceptions.RegistrationFailed, extract_result, + client.ready.get()) + + # The connections list is unchanged because the new connection + # immediately disconnects. + self.assertEqual(client.service.connections, {}) + self.assertFalse(client.connected) + + def test_disconnects_when_registration_errors(self): + client = self.make_running_client() + exception_type = factory.make_exception_type() + self.patch_authenticate_for_success(client) + self.patch_register_for_error(client, exception_type()) + + logger = self.useFixture(TwistedLoggerFixture()) + + # Connect via an in-memory transport. + transport = StringTransportWithDisconnection() + transport.protocol = client + client.makeConnection(transport) + + # authenticated was set to True because it succeeded. + self.assertIs(True, extract_result(client.authenticated.get())) + # ready was set with the exception we made. + self.assertRaises(exception_type, extract_result, client.ready.get()) + + # The log was written to. + self.assertDocTestMatches( + """... + Event-loop 'eventloop:pid=12345' handshake failed; + dropping connection. + Traceback (most recent call last):... + """, + logger.dump()) + + # The connections list is unchanged because the new connection + # immediately disconnects. + self.assertEqual(client.service.connections, {}) + self.assertFalse(client.connected) + + def test_handshakeFailed_does_not_log_when_connection_is_closed(self): + client = self.make_running_client() + with TwistedLoggerFixture() as logger: + client.handshakeFailed(Failure(ConnectionClosed())) + # ready was set with ConnectionClosed. + self.assertRaises( + ConnectionClosed, extract_result, + client.ready.get()) + # Nothing was logged. + self.assertEqual("", logger.output) + @inlineCallbacks def test_secureConnection_calls_StartTLS_and_Identify(self): client = self.make_running_client() @@ -593,7 +1037,7 @@ callRemote = self.patch(client, "callRemote") callRemote_return_values = [ {}, # In response to a StartTLS call. - {"name": client.eventloop}, # Identify. + {"ident": client.eventloop}, # Identify. ] callRemote.side_effect = lambda cmd, **kwargs: ( callRemote_return_values.pop(0)) @@ -626,12 +1070,10 @@ client = self.make_running_client() callRemote = self.patch(client, "callRemote") - callRemote_return_values = [ + callRemote.side_effect = [ {}, # In response to a StartTLS call. - {"name": "bogus-name"}, # Identify. + {"ident": "bogus-name"}, # Identify. ] - callRemote.side_effect = lambda cmd, **kwargs: ( - callRemote_return_values.pop(0)) transport = self.patch(client, "transport") logger = self.useFixture(TwistedLoggerFixture()) @@ -650,16 +1092,132 @@ """, logger.dump()) + @skip( + "XXX: blake_r 2015-02-26 bug=1426089: Failing because of an " + "unknown reason.") + @inlineCallbacks + def test_secureConnection_end_to_end(self): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop() + self.addCleanup((yield connecting)) + client = yield getRegionClient() + # XXX: Expose secureConnection() in the client. + yield client._conn.secureConnection() + self.assertTrue(client.isSecure()) + + def test_authenticateRegion_accepts_matching_digests(self): + client = self.make_running_client() + + def calculate_digest(_, message): + # Use the cluster's own authentication responder. + response = Cluster().authenticate(message) + return succeed(response) + + callRemote = self.patch_autospec(client, "callRemote") + callRemote.side_effect = calculate_digest + + d = client.authenticateRegion() + self.assertTrue(extract_result(d)) + + def test_authenticateRegion_rejects_non_matching_digests(self): + client = self.make_running_client() + + def calculate_digest(_, message): + # Return some nonsense. + response = { + "digest": factory.make_bytes(), + "salt": factory.make_bytes(), + } + return succeed(response) + + callRemote = self.patch_autospec(client, "callRemote") + callRemote.side_effect = calculate_digest + + d = client.authenticateRegion() + self.assertFalse(extract_result(d)) + + def test_authenticateRegion_propagates_errors(self): + client = self.make_running_client() + exception_type = factory.make_exception_type() + + callRemote = self.patch_autospec(client, "callRemote") + callRemote.return_value = fail(exception_type()) + + d = client.authenticateRegion() + self.assertRaises(exception_type, extract_result, d) + + @inlineCallbacks + def test_authenticateRegion_end_to_end(self): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop() + self.addCleanup((yield connecting)) + yield getRegionClient() + self.assertThat( + protocol.Authenticate, + MockCalledOnceWith(protocol, message=ANY)) + + def test_registerWithRegion_returns_True_when_accepted(self): + client = self.make_running_client() + + callRemote = self.patch_autospec(client, "callRemote") + callRemote.side_effect = always_succeed_with({}) + + logger = self.useFixture(TwistedLoggerFixture()) + + d = client.registerWithRegion() + self.assertTrue(extract_result(d)) + + self.assertDocTestMatches( + "Cluster '...' registered (via ...).", + logger.output) + + def test_registerWithRegion_returns_False_when_rejected(self): + client = self.make_running_client() + + callRemote = self.patch_autospec(client, "callRemote") + callRemote.return_value = fail(exceptions.CannotRegisterCluster()) + + logger = self.useFixture(TwistedLoggerFixture()) + + d = client.registerWithRegion() + self.assertFalse(extract_result(d)) + + self.assertDocTestMatches( + "Cluster '...' REJECTED by the region (via ...).", + logger.output) + + def test_registerWithRegion_propagates_errors(self): + client = self.make_running_client() + exception_type = factory.make_exception_type() + + callRemote = self.patch_autospec(client, "callRemote") + callRemote.return_value = fail(exception_type()) + + d = client.registerWithRegion() + self.assertRaises(exception_type, extract_result, d) + + @inlineCallbacks + def test_registerWithRegion_end_to_end(self): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop() + self.addCleanup((yield connecting)) + yield getRegionClient() + self.assertThat( + protocol.Register, MockCalledOnceWith( + protocol, uuid=get_cluster_uuid(), + networks=discover_networks(), + url=urlparse(get_maas_url()))) + class TestClusterProtocol_ListSupportedArchitectures(MAASTestCase): - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.ListSupportedArchitectures.commandName) - self.assertIsNot(responder, None) + self.assertIsNotNone(responder) @inlineCallbacks def test_returns_architectures(self): @@ -673,3 +1231,927 @@ 'description': 'i386', }, architectures['architectures']) + + +class TestClusterProtocol_ListOperatingSystems(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.ListOperatingSystems.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_returns_oses(self): + # Patch in some operating systems with some randomised data. See + # StubOS for details of the rules that are used to populate the + # non-random elements. + operating_systems = [ + StubOS(factory.make_name("os"), releases=[ + (factory.make_name("name"), factory.make_name("title")) + for _ in range(randint(2, 5)) + ]) + for _ in range(randint(2, 5)) + ] + self.patch( + osystems_rpc_module, "OperatingSystemRegistry", + [(os.name, os) for os in operating_systems]) + osystems = yield call_responder( + Cluster(), cluster.ListOperatingSystems, {}) + # The fully-populated output from gen_operating_systems() sent + # back over the wire. + expected_osystems = list(gen_operating_systems()) + for expected_osystem in expected_osystems: + expected_osystem["releases"] = list(expected_osystem["releases"]) + expected = {"osystems": expected_osystems} + self.assertEqual(expected, osystems) + + +class TestClusterProtocol_GetOSReleaseTitle(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.GetOSReleaseTitle.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_calls_get_os_release_title(self): + title = factory.make_name('title') + get_os_release_title = self.patch( + clusterservice, "get_os_release_title") + get_os_release_title.return_value = title + arguments = { + "osystem": factory.make_name("osystem"), + "release": factory.make_name("release"), + } + observed = yield call_responder( + Cluster(), cluster.GetOSReleaseTitle, arguments) + expected = {"title": title} + self.assertEqual(expected, observed) + # The arguments are passed to the responder positionally. + self.assertThat(get_os_release_title, MockCalledOnceWith( + arguments["osystem"], arguments["release"])) + + @inlineCallbacks + def test_exception_when_os_does_not_exist(self): + # A remote NoSuchOperatingSystem exception is re-raised locally. + get_os_release_title = self.patch( + clusterservice, "get_os_release_title") + get_os_release_title.side_effect = exceptions.NoSuchOperatingSystem() + arguments = { + "osystem": factory.make_name("osystem"), + "release": factory.make_name("release"), + } + with ExpectedException(exceptions.NoSuchOperatingSystem): + yield call_responder( + Cluster(), cluster.GetOSReleaseTitle, arguments) + + +class TestClusterProtocol_ValidateLicenseKey(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.ValidateLicenseKey.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_calls_validate_license_key(self): + validate_license_key = self.patch( + clusterservice, "validate_license_key") + validate_license_key.return_value = factory.pick_bool() + arguments = { + "osystem": factory.make_name("osystem"), + "release": factory.make_name("release"), + "key": factory.make_name("key"), + } + observed = yield call_responder( + Cluster(), cluster.ValidateLicenseKey, arguments) + expected = {"is_valid": validate_license_key.return_value} + self.assertEqual(expected, observed) + # The arguments are passed to the responder positionally. + self.assertThat(validate_license_key, MockCalledOnceWith( + arguments["osystem"], arguments["release"], arguments["key"])) + + @inlineCallbacks + def test_exception_when_os_does_not_exist(self): + # A remote NoSuchOperatingSystem exception is re-raised locally. + validate_license_key = self.patch( + clusterservice, "validate_license_key") + validate_license_key.side_effect = exceptions.NoSuchOperatingSystem() + arguments = { + "osystem": factory.make_name("osystem"), + "release": factory.make_name("release"), + "key": factory.make_name("key"), + } + with ExpectedException(exceptions.NoSuchOperatingSystem): + yield call_responder( + Cluster(), cluster.ValidateLicenseKey, arguments) + + +class TestClusterProtocol_GetPreseedData(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def make_arguments(self): + return { + "osystem": factory.make_name("osystem"), + "preseed_type": factory.make_name("preseed_type"), + "node_system_id": factory.make_name("system_id"), + "node_hostname": factory.make_name("hostname"), + "consumer_key": factory.make_name("consumer_key"), + "token_key": factory.make_name("token_key"), + "token_secret": factory.make_name("token_secret"), + "metadata_url": urlparse( + "https://%s/path/to/metadata" % factory.make_hostname()), + } + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.GetPreseedData.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_calls_get_preseed_data(self): + get_preseed_data = self.patch(clusterservice, "get_preseed_data") + get_preseed_data.return_value = factory.make_name("data") + arguments = self.make_arguments() + observed = yield call_responder( + Cluster(), cluster.GetPreseedData, arguments) + expected = {"data": get_preseed_data.return_value} + self.assertEqual(expected, observed) + # The arguments are passed to the responder positionally. + self.assertThat(get_preseed_data, MockCalledOnceWith( + arguments["osystem"], arguments["preseed_type"], + arguments["node_system_id"], arguments["node_hostname"], + arguments["consumer_key"], arguments["token_key"], + arguments["token_secret"], arguments["metadata_url"])) + + @inlineCallbacks + def test_exception_when_os_does_not_exist(self): + # A remote NoSuchOperatingSystem exception is re-raised locally. + get_preseed_data = self.patch( + clusterservice, "get_preseed_data") + get_preseed_data.side_effect = exceptions.NoSuchOperatingSystem() + arguments = self.make_arguments() + with ExpectedException(exceptions.NoSuchOperatingSystem): + yield call_responder( + Cluster(), cluster.GetPreseedData, arguments) + + @inlineCallbacks + def test_exception_when_preseed_not_implemented(self): + # A remote NotImplementedError exception is re-raised locally. + # Choose an operating system which has not overridden the + # default compose_preseed. + osystem_name = next( + osystem_name for osystem_name, osystem in OperatingSystemRegistry + if osystem.compose_preseed == OperatingSystem.compose_preseed) + arguments = self.make_arguments() + arguments["osystem"] = osystem_name + with ExpectedException(exceptions.NoSuchOperatingSystem): + yield call_responder( + Cluster(), cluster.GetPreseedData, arguments) + + +class TestClusterProtocol_ComposeCurtinNetworkPreseed(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def make_args(self, osystem=None): + if osystem is None: + osystem = factory.make_name('os') + mac = factory.make_mac_address() + return { + 'osystem': osystem, + 'config': { + 'interfaces': [(factory.make_name('eth'), mac)], + 'auto_interfaces': [mac], + 'ips_mapping': {mac: [factory.make_ipv4_address()]}, + 'gateways_mapping': {mac: [factory.make_ipv4_address()]}, + 'nameservers': [], + }, + 'disable_ipv4': factory.pick_bool(), + } + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.ComposeCurtinNetworkPreseed.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test__calls_compose_curtin_network_preseed(self): + preseed = [factory.make_name('preseed')] + fake = self.patch_autospec( + clusterservice, 'compose_curtin_network_preseed') + fake.return_value = preseed + args = self.make_args() + + response = yield call_responder( + Cluster(), cluster.ComposeCurtinNetworkPreseed, args) + + self.expectThat(response, Equals({'data': preseed})) + self.expectThat( + fake, + MockCalledOnceWith( + args['osystem'], args['config'], args['disable_ipv4'])) + + @inlineCallbacks + def test__fails_for_unknown_OS(self): + args = self.make_args(osystem=factory.make_name('nonexistent-os')) + with ExpectedException(exceptions.NoSuchOperatingSystem): + yield call_responder( + Cluster(), cluster.ComposeCurtinNetworkPreseed, args) + + +class TestClusterProtocol_PowerOn_PowerOff(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + scenarios = ( + ("power-on", { + "command": cluster.PowerOn, + "expected_power_change": "on", + }), + ("power-off", { + "command": cluster.PowerOff, + "expected_power_change": "off", + }), + ) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder(self.command.commandName) + self.assertIsNotNone(responder) + + def test_executes_maybe_change_power_state(self): + maybe_change_power_state = self.patch( + clusterservice, "maybe_change_power_state") + + system_id = factory.make_name("system_id") + hostname = factory.make_name("hostname") + power_type = factory.make_name("power_type") + context = { + factory.make_name("name"): factory.make_name("value"), + } + + d = call_responder(Cluster(), self.command, { + "system_id": system_id, + "hostname": hostname, + "power_type": power_type, + "context": context, + }) + + def check(response): + self.assertThat( + maybe_change_power_state, + MockCalledOnceWith( + system_id, hostname, power_type, + power_change=self.expected_power_change, context=context)) + return d.addCallback(check) + + def test_power_on_can_propagate_UnknownPowerType(self): + self.patch(clusterservice, "maybe_change_power_state").side_effect = ( + UnknownPowerType) + + d = call_responder(Cluster(), self.command, { + "system_id": "id", "hostname": "hostname", "power_type": "type", + "context": {}, + }) + # If the call doesn't fail then we have a test failure; we're + # *expecting* UnknownPowerType to be raised. + d.addCallback(self.fail) + + def check(failure): + failure.trap(UnknownPowerType) + return d.addErrback(check) + + def test_power_on_can_propagate_NotImplementedError(self): + self.patch(clusterservice, "maybe_change_power_state").side_effect = ( + NotImplementedError) + + d = call_responder(Cluster(), self.command, { + "system_id": "id", "hostname": "hostname", "power_type": "type", + "context": {}, + }) + # If the call doesn't fail then we have a test failure; we're + # *expecting* NotImplementedError to be raised. + d.addCallback(self.fail) + + def check(failure): + failure.trap(NotImplementedError) + return d.addErrback(check) + + def test_power_on_can_propagate_PowerActionFail(self): + self.patch(clusterservice, "maybe_change_power_state").side_effect = ( + PowerActionFail) + + d = call_responder(Cluster(), self.command, { + "system_id": "id", "hostname": "hostname", "power_type": "type", + "context": {}, + }) + # If the call doesn't fail then we have a test failure; we're + # *expecting* PowerActionFail to be raised. + d.addCallback(self.fail) + + def check(failure): + failure.trap(PowerActionFail) + return d.addErrback(check) + + def test_power_on_can_propagate_PowerActionAlreadyInProgress(self): + self.patch(clusterservice, "maybe_change_power_state").side_effect = ( + exceptions.PowerActionAlreadyInProgress) + + d = call_responder(Cluster(), self.command, { + "system_id": "id", "hostname": "hostname", "power_type": "type", + "context": {}, + }) + # If the call doesn't fail then we have a test failure; we're + # *expecting* PowerActionFail to be raised. + d.addCallback(self.fail) + + def check(failure): + failure.trap(exceptions.PowerActionAlreadyInProgress) + return d.addErrback(check) + + +class TestClusterProtocol_PowerQuery(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.PowerQuery.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_returns_power_state(self): + state = random.choice(['on', 'off']) + power_state_update = self.patch( + power_module, "power_state_update") + perform_power_query = self.patch( + power_module, "perform_power_query") + perform_power_query.return_value = state + + arguments = { + 'system_id': factory.make_name(''), + 'hostname': factory.make_name(''), + 'power_type': random.choice(QUERY_POWER_TYPES), + 'context': factory.make_name(''), + } + observed = yield call_responder( + Cluster(), cluster.PowerQuery, arguments) + self.assertEqual({'state': state}, observed) + self.assertThat( + perform_power_query, + MockCalledOnceWith( + arguments['system_id'], arguments['hostname'], + arguments['power_type'], arguments['context'])) + self.assertThat( + power_state_update, + MockCalledOnceWith(arguments['system_id'], state)) + + +class TestClusterProtocol_ConfigureDHCP(MAASTestCase): + + scenarios = ( + ("DHCPv4", { + "dhcp_server": (dhcp, "DHCPv4Server"), + "command": cluster.ConfigureDHCPv4, + "make_network": factory.make_ipv4_network, + }), + ("DHCPv6", { + "dhcp_server": (dhcp, "DHCPv6Server"), + "command": cluster.ConfigureDHCPv6, + "make_network": factory.make_ipv6_network, + }), + ) + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test__is_registered(self): + self.assertIsNotNone( + Cluster().locateResponder(self.command.commandName)) + + @inlineCallbacks + def test__executes_configure_dhcp(self): + DHCPServer = self.patch_autospec(*self.dhcp_server) + configure = self.patch_autospec(dhcp, "configure") + + omapi_key = factory.make_name('key') + subnet_configs = [make_subnet_config()] + + yield call_responder(Cluster(), self.command, { + 'omapi_key': omapi_key, + 'subnet_configs': subnet_configs, + }) + + self.assertThat(DHCPServer, MockCalledOnceWith(omapi_key)) + self.assertThat(configure, MockCalledOnceWith( + DHCPServer.return_value, subnet_configs)) + + @inlineCallbacks + def test__limits_concurrency(self): + self.patch_autospec(*self.dhcp_server) + + def check_dhcp_locked(server, subnet_configs): + self.assertTrue(concurrency.dhcp.locked) + # While we're here, check this is *not* the IO thread. + self.expectThat(isInIOThread(), Is(False)) + + self.patch(dhcp, "configure", check_dhcp_locked) + + self.assertFalse(concurrency.dhcp.locked) + yield call_responder(Cluster(), self.command, { + 'omapi_key': factory.make_name('key'), + 'subnet_configs': [], + }) + self.assertFalse(concurrency.dhcp.locked) + + @inlineCallbacks + def test__propagates_CannotConfigureDHCP(self): + configure = self.patch_autospec(dhcp, "configure") + configure.side_effect = ( + exceptions.CannotConfigureDHCP("Deliberate failure")) + omapi_key = factory.make_name('key') + network = self.make_network() + ip_low, ip_high = factory.make_ip_range(network) + subnet_configs = [make_subnet_config()] + + with ExpectedException(exceptions.CannotConfigureDHCP): + yield call_responder(Cluster(), self.command, { + 'omapi_key': omapi_key, + 'subnet_configs': subnet_configs, + }) + + +class TestClusterProtocol_CreateHostMaps(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.CreateHostMaps.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_executes_create_host_maps(self): + create_host_maps = self.patch(clusterservice, "create_host_maps") + mappings = [ + {"ip_address": factory.make_ipv4_address(), + "mac_address": factory.make_mac_address()} + for _ in range(2) + ] + shared_key = factory.make_name("shared_key") + + yield call_responder(Cluster(), cluster.CreateHostMaps, { + "mappings": mappings, "shared_key": shared_key, + }) + self.assertThat( + create_host_maps, MockCalledOnceWith( + mappings, shared_key)) + + @inlineCallbacks + def test__limits_concurrency(self): + + def check_dhcp_locked(mappings, shared_key): + self.assertTrue(concurrency.dhcp.locked) + # While we're here, check this is *not* the IO thread. + self.expectThat(isInIOThread(), Is(False)) + + self.patch(clusterservice, "create_host_maps", check_dhcp_locked) + + self.assertFalse(concurrency.dhcp.locked) + yield call_responder(Cluster(), cluster.CreateHostMaps, { + "mappings": {}, "shared_key": factory.make_name("key"), + }) + self.assertFalse(concurrency.dhcp.locked) + + +class TestClusterProtocol_RemoveHostMaps(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.RemoveHostMaps.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_executes_remove_host_maps(self): + remove_host_maps = self.patch(clusterservice, "remove_host_maps") + ip_addresses = [factory.make_ipv4_address() for _ in range(2)] + shared_key = factory.make_name("shared_key") + + yield call_responder(Cluster(), cluster.RemoveHostMaps, { + "ip_addresses": ip_addresses, "shared_key": shared_key, + }) + self.assertThat( + remove_host_maps, MockCalledOnceWith( + ip_addresses, shared_key)) + + @inlineCallbacks + def test__limits_concurrency(self): + + def check_dhcp_locked(ip_addresses, shared_key): + self.assertTrue(concurrency.dhcp.locked) + # While we're here, check this is *not* the IO thread. + self.expectThat(isInIOThread(), Is(False)) + + self.patch(clusterservice, "remove_host_maps", check_dhcp_locked) + + self.assertFalse(concurrency.dhcp.locked) + yield call_responder(Cluster(), cluster.RemoveHostMaps, { + "ip_addresses": [], "shared_key": factory.make_name("key"), + }) + self.assertFalse(concurrency.dhcp.locked) + + +class TestClusterProtocol_StartMonitors(MAASTestCase): + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.StartMonitors.commandName) + self.assertIsNotNone(responder) + + def test__executes_start_monitors(self): + deadline = datetime.now(amp.utc) + timedelta(seconds=10) + monitors = [{ + "deadline": deadline, "context": factory.make_name("ctx"), + "id": factory.make_name("id")}] + d = call_responder( + Cluster(), cluster.StartMonitors, {"monitors": monitors}) + self.addCleanup(cancel_monitor, monitors[0]["id"]) + self.assertTrue(d.called) + self.assertThat(running_monitors, Contains(monitors[0]["id"])) + + +class TestClusterProtocol_CancelMonitor(MAASTestCase): + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.CancelMonitor.commandName) + self.assertIsNotNone(responder) + + def test__executes_cancel_monitor(self): + deadline = datetime.now(amp.utc) + timedelta(seconds=10) + monitors = [{ + "deadline": deadline, "context": factory.make_name("ctx"), + "id": factory.make_name("id")}] + call_responder( + Cluster(), cluster.StartMonitors, {"monitors": monitors}) + + call_responder( + Cluster(), cluster.CancelMonitor, {"id": monitors[0]["id"]}) + self.assertThat(running_monitors, Not(Contains(monitors[0]["id"]))) + + +class TestClusterProtocol_EvaluateTag(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.EvaluateTag.commandName) + self.assertIsNotNone(responder) + + @inlineCallbacks + def test_happy_path(self): + get_maas_url = self.patch_autospec(tags, "get_maas_url") + get_maas_url.return_value = sentinel.maas_url + get_cluster_uuid = self.patch_autospec(tags, "get_cluster_uuid") + get_cluster_uuid.return_value = sentinel.cluster_uuid + + # Prevent real work being done, which would involve HTTP calls. + self.patch_autospec(tags, "process_node_tags") + + response = yield call_responder( + Cluster(), cluster.EvaluateTag, { + "tag_name": "all-nodes", + "tag_definition": "//*", + "tag_nsmap": [ + {"prefix": "foo", + "uri": "http://foo.example.com/"}, + ], + "credentials": "abc:def:ghi", + }) + + self.assertEqual({}, response) + + @inlineCallbacks + def test__calls_through_to_evaluate_tag_helper(self): + evaluate_tag = self.patch_autospec(clusterservice, "evaluate_tag") + + tag_name = factory.make_name("tag-name") + tag_definition = factory.make_name("tag-definition") + tag_ns_prefix = factory.make_name("tag-ns-prefix") + tag_ns_uri = factory.make_name("tag-ns-uri") + + consumer_key = factory.make_name("ckey") + resource_token = factory.make_name("rtok") + resource_secret = factory.make_name("rsec") + credentials = convert_tuple_to_string( + (consumer_key, resource_token, resource_secret)) + + yield call_responder( + Cluster(), cluster.EvaluateTag, { + "tag_name": tag_name, + "tag_definition": tag_definition, + "tag_nsmap": [ + {"prefix": tag_ns_prefix, "uri": tag_ns_uri}, + ], + "credentials": credentials, + }) + + self.assertThat(evaluate_tag, MockCalledOnceWith( + tag_name, tag_definition, {tag_ns_prefix: tag_ns_uri}, + (consumer_key, resource_token, resource_secret), + )) + + +class TestClusterProtocol_AddVirsh(MAASTestCase): + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.AddVirsh.commandName) + self.assertIsNotNone(responder) + + def test__calls_deferToThread_with_probe_virsh_and_enlist(self): + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + poweraddr = factory.make_name('poweraddr') + password = factory.make_name('password') + prefix_filter = factory.make_name('prefix_filter') + call_responder(Cluster(), cluster.AddVirsh, { + "poweraddr": poweraddr, + "password": password, + "prefix_filter": prefix_filter, + }) + self.assertThat( + mock_deferToThread, MockCalledOnceWith( + clusterservice.probe_virsh_and_enlist, + poweraddr, password, prefix_filter)) + + def test__password_is_optional(self): + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + poweraddr = factory.make_name('poweraddr') + call_responder(Cluster(), cluster.AddVirsh, { + "poweraddr": poweraddr, + "password": None, + }) + self.assertThat( + mock_deferToThread, MockCalledOnceWith( + clusterservice.probe_virsh_and_enlist, + poweraddr, None, None)) + + def test__can_be_called_without_password_key(self): + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + poweraddr = factory.make_name('poweraddr') + call_responder(Cluster(), cluster.AddVirsh, { + "poweraddr": poweraddr, + }) + self.assertThat( + mock_deferToThread, MockCalledOnceWith( + clusterservice.probe_virsh_and_enlist, + poweraddr, None, None)) + + def test__logs_error_to_maaslog(self): + fake_error = factory.make_name('error') + self.patch(clusterservice, 'maaslog') + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + mock_deferToThread.return_value = fail(Exception(fake_error)) + poweraddr = factory.make_name('poweraddr') + password = factory.make_name('password') + prefix_filter = factory.make_name('prefix_filter') + call_responder(Cluster(), cluster.AddVirsh, { + "poweraddr": poweraddr, + "password": password, + "prefix_filter": prefix_filter, + }) + self.assertThat( + clusterservice.maaslog.error, + MockAnyCall( + "Failed to probe and enlist %s nodes: %s", + "virsh", fake_error)) + + +class TestClusterProtocol_AddSeaMicro15k(MAASTestCase): + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.AddSeaMicro15k.commandName) + self.assertIsNotNone(responder) + + def test__calls_find_ip_via_arp(self): + # Prevent any actual probing from happing. + self.patch_autospec( + clusterservice, 'deferToThread') + find_ip_via_arp = self.patch_autospec( + clusterservice, 'find_ip_via_arp') + find_ip_via_arp.return_value = factory.make_ipv4_address() + + mac = factory.make_mac_address() + username = factory.make_name('user') + password = factory.make_name('password') + power_control = factory.make_name('power_control') + call_responder(Cluster(), cluster.AddSeaMicro15k, { + "mac": mac, + "username": username, + "password": password, + "power_control": power_control + }) + + self.assertThat( + find_ip_via_arp, MockCalledOnceWith(mac)) + + @inlineCallbacks + def test__raises_and_logs_warning_if_no_ip_found_for_mac(self): + maaslog = self.patch(clusterservice, 'maaslog') + find_ip_via_arp = self.patch_autospec( + clusterservice, 'find_ip_via_arp') + find_ip_via_arp.return_value = None + + mac = factory.make_mac_address() + username = factory.make_name('user') + password = factory.make_name('password') + power_control = factory.make_name('power_control') + + with ExpectedException(exceptions.NoIPFoundForMACAddress): + yield call_responder(Cluster(), cluster.AddSeaMicro15k, { + "mac": mac, + "username": username, + "password": password, + "power_control": power_control + }) + + self.assertThat( + maaslog.warning, + MockCalledOnceWith( + "Couldn't find IP address for MAC %s" % mac)) + + def test__calls_deferToThread_with_probe_seamicro15k_and_enlist(self): + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + find_ip_via_arp = self.patch_autospec( + clusterservice, 'find_ip_via_arp') + find_ip_via_arp.return_value = factory.make_ipv4_address() + + mac = factory.make_mac_address() + username = factory.make_name('user') + password = factory.make_name('password') + power_control = factory.make_name('power_control') + call_responder(Cluster(), cluster.AddSeaMicro15k, { + "mac": mac, + "username": username, + "password": password, + "power_control": power_control + }) + + self.assertThat( + mock_deferToThread, MockCalledOnceWith( + clusterservice.probe_seamicro15k_and_enlist, + find_ip_via_arp.return_value, username, password, + power_control=power_control)) + + def test__logs_error_to_maaslog(self): + fake_error = factory.make_name('error') + self.patch(clusterservice, 'maaslog') + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + mock_deferToThread.return_value = fail(Exception(fake_error)) + find_ip_via_arp = self.patch_autospec( + clusterservice, 'find_ip_via_arp') + find_ip_via_arp.return_value = factory.make_ipv4_address() + + mac = factory.make_mac_address() + username = factory.make_name('user') + password = factory.make_name('password') + power_control = factory.make_name('power_control') + call_responder(Cluster(), cluster.AddSeaMicro15k, { + "mac": mac, + "username": username, + "password": password, + "power_control": power_control, + }) + self.assertThat( + clusterservice.maaslog.error, + MockAnyCall( + "Failed to probe and enlist %s nodes: %s", + "SeaMicro 15000", fake_error)) + + +class TestClusterProtocol_EnlistNodesFromMSCM(MAASTestCase): + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.EnlistNodesFromMSCM.commandName) + self.assertIsNotNone(responder) + + def test__deferToThread_with_probe_and_enlist_mscm(self): + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + + host = factory.make_name('host') + username = factory.make_name('user') + password = factory.make_name('password') + + call_responder(Cluster(), cluster.EnlistNodesFromMSCM, { + 'host': host, + 'username': username, + 'password': password, + }) + + self.assertThat( + mock_deferToThread, MockCalledOnceWith( + clusterservice.probe_and_enlist_mscm, + host, username, password)) + + def test__logs_error_to_maaslog(self): + fake_error = factory.make_name('error') + self.patch(clusterservice, 'maaslog') + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + mock_deferToThread.return_value = fail(Exception(fake_error)) + host = factory.make_name('host') + username = factory.make_name('user') + password = factory.make_name('password') + + call_responder(Cluster(), cluster.EnlistNodesFromMSCM, { + "host": host, + "username": username, + "password": password, + }) + self.assertThat( + clusterservice.maaslog.error, + MockAnyCall( + "Failed to probe and enlist %s nodes: %s", + "Moonshot", fake_error)) + + +class TestClusterProtocol_EnlistNodesFromUCSM(MAASTestCase): + + def test__is_registered(self): + protocol = Cluster() + responder = protocol.locateResponder( + cluster.EnlistNodesFromUCSM.commandName) + self.assertIsNotNone(responder) + + def test__calls_deferToThread_with_probe_and_enlist_ucsm(self): + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + + url = factory.make_url() + username = factory.make_name('user') + password = factory.make_name('password') + + call_responder(Cluster(), cluster.EnlistNodesFromUCSM, { + 'url': url, + 'username': username, + 'password': password, + }) + + self.assertThat( + mock_deferToThread, MockCalledOnceWith( + clusterservice.probe_and_enlist_ucsm, + url, username, password)) + + def test__logs_error_to_maaslog(self): + fake_error = factory.make_name('error') + self.patch(clusterservice, 'maaslog') + mock_deferToThread = self.patch_autospec( + clusterservice, 'deferToThread') + mock_deferToThread.return_value = fail(Exception(fake_error)) + url = factory.make_url() + username = factory.make_name('user') + password = factory.make_name('password') + + call_responder(Cluster(), cluster.EnlistNodesFromUCSM, { + "url": url, + "username": username, + "password": password, + }) + self.assertThat( + clusterservice.maaslog.error, + MockAnyCall( + "Failed to probe and enlist %s nodes: %s", + "UCS", fake_error)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_common.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_common.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_common.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_common.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,7 +14,13 @@ __metaclass__ = type __all__ = [] -from maastesting.matchers import MockCalledOnceWith +import re + +from maastesting.matchers import ( + IsFiredDeferred, + IsUnfiredDeferred, + MockCalledOnceWith, + ) from maastesting.testcase import MAASTestCase from mock import ( create_autospec, @@ -22,12 +28,16 @@ ) from provisioningserver.rpc import common from provisioningserver.rpc.testing.doubles import DummyConnection +from testtools import ExpectedException from testtools.matchers import ( Equals, Is, + IsInstance, Not, ) +from twisted.internet.protocol import connectionDone from twisted.protocols import amp +from twisted.test.proto_helpers import StringTransport class TestClient(MAASTestCase): @@ -38,7 +48,7 @@ self.assertThat(client._conn, Is(conn)) def make_connection_and_client(self): - conn = create_autospec(amp.AMP()) + conn = create_autospec(common.RPCProtocol()) client = common.Client(conn) return conn, client @@ -55,6 +65,16 @@ self.assertThat(conn.callRemote, MockCalledOnceWith( sentinel.command, foo=sentinel.foo, bar=sentinel.bar)) + def test_call_with_keyword_arguments_raises_useful_error(self): + conn = DummyConnection() + client = common.Client(conn) + expected_message = re.escape( + "provisioningserver.rpc.common.Client called with 3 positional " + "arguments, (1, 2, 3), but positional arguments are not " + "supported. Usage: client(command, arg1=value1, ...)") + with ExpectedException(TypeError, expected_message): + client(sentinel.command, 1, 2, 3) + def test_getHostCertificate(self): conn, client = self.make_connection_and_client() conn.hostCertificate = sentinel.hostCertificate @@ -91,3 +111,23 @@ conn, client = self.make_connection_and_client() # The hash of a common.Client object is that of its connection. self.assertThat(hash(conn), Equals(hash(client))) + + +class TestRPCProtocol(MAASTestCase): + + def test_init(self): + protocol = common.RPCProtocol() + self.assertThat(protocol.onConnectionMade, IsUnfiredDeferred()) + self.assertThat(protocol.onConnectionLost, IsUnfiredDeferred()) + self.assertThat(protocol, IsInstance(amp.AMP)) + + def test_onConnectionMade_fires_when_connection_is_made(self): + protocol = common.RPCProtocol() + protocol.connectionMade() + self.assertThat(protocol.onConnectionMade, IsFiredDeferred()) + + def test_onConnectionLost_fires_when_connection_is_lost(self): + protocol = common.RPCProtocol() + protocol.makeConnection(StringTransport()) + protocol.connectionLost(connectionDone) + self.assertThat(protocol.onConnectionLost, IsFiredDeferred()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_dhcp.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_dhcp.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_dhcp.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_dhcp.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,314 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for :py:module:`~provisioningserver.rpc.dhcp`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from random import randint + +from fixtures import FakeLogger +from maastesting.factory import factory +from maastesting.matchers import ( + MockAnyCall, + MockCalledOnceWith, + MockCalledWith, + MockCallsMatch, + MockNotCalled, + ) +from maastesting.testcase import MAASTestCase +from mock import ( + ANY, + call, + sentinel, + ) +from provisioningserver.dhcp import control +from provisioningserver.dhcp.omshell import Omshell +from provisioningserver.dhcp.testing.config import make_subnet_config +from provisioningserver.rpc import ( + dhcp, + exceptions, + ) +from provisioningserver.rpc.exceptions import CannotConfigureDHCP +from provisioningserver.utils.shell import ExternalProcessError + + +class TestConfigureDHCP(MAASTestCase): + + scenarios = ( + ("DHCPv4", {"server": dhcp.DHCPv4Server}), + ("DHCPv6", {"server": dhcp.DHCPv6Server}), + ) + + def configure(self, omapi_key, subnets): + server = self.server(omapi_key) + dhcp.configure(server, subnets) + + def patch_sudo_write_file(self): + return self.patch_autospec(dhcp, 'sudo_write_file') + + def patch_server_restart(self): + return self.patch_autospec(self.server, 'restart') + + def patch_server_stop(self): + return self.patch_autospec(self.server, 'stop') + + def patch_get_config(self): + return self.patch_autospec(dhcp, 'get_config') + + def test__extracts_interfaces(self): + write_file = self.patch_sudo_write_file() + self.patch_server_restart() + subnets = [make_subnet_config() for _ in range(3)] + self.configure(factory.make_name('key'), subnets) + self.assertThat( + write_file, + MockCalledWith( + ANY, + ' '.join(sorted(subnet['interface'] for subnet in subnets)))) + + def test__eliminates_duplicate_interfaces(self): + write_file = self.patch_sudo_write_file() + self.patch_server_restart() + interface = factory.make_name('interface') + subnets = [make_subnet_config() for _ in range(2)] + for subnet in subnets: + subnet['interface'] = interface + self.configure(factory.make_name('key'), subnets) + self.assertThat(write_file, MockCalledWith(ANY, interface)) + + def test__composes_dhcp_config(self): + self.patch_sudo_write_file() + self.patch_server_restart() + get_config = self.patch_get_config() + omapi_key = factory.make_name('key') + subnet = make_subnet_config() + self.configure(omapi_key, [subnet]) + self.assertThat( + get_config, + MockCalledOnceWith( + self.server.template_basename, omapi_key=omapi_key, + dhcp_subnets=[subnet])) + + def test__writes_dhcp_config(self): + write_file = self.patch_sudo_write_file() + self.patch_server_restart() + + subnet = make_subnet_config() + expected_config = factory.make_name('config') + self.patch_get_config().return_value = expected_config + + self.configure(factory.make_name('key'), [subnet]) + + self.assertThat( + write_file, + MockAnyCall(self.server.config_filename, expected_config)) + + def test__writes_interfaces_file(self): + write_file = self.patch_sudo_write_file() + self.patch_server_restart() + self.configure(factory.make_name('key'), [make_subnet_config()]) + self.assertThat( + write_file, + MockCalledWith(self.server.interfaces_filename, ANY)) + + def test__restarts_dhcp_server_if_subnets_defined(self): + self.patch_sudo_write_file() + restart_dhcp = self.patch_server_restart() + self.configure(factory.make_name('key'), [make_subnet_config()]) + self.assertThat(restart_dhcp, MockCalledWith(ANY)) + + def test__stops_dhcp_server_if_no_subnets_defined(self): + self.patch_sudo_write_file() + restart_dhcp = self.patch_server_restart() + stop_dhcp = self.patch_server_stop() + self.configure(factory.make_name('key'), []) + self.assertThat(stop_dhcp, MockCalledWith(ANY)) + self.assertThat(restart_dhcp, MockNotCalled()) + + def test__converts_failure_writing_file_to_CannotConfigureDHCP(self): + self.patch_sudo_write_file().side_effect = ( + ExternalProcessError(1, "sudo something")) + self.patch_server_restart() + self.assertRaises( + exceptions.CannotConfigureDHCP, self.configure, + factory.make_name('key'), [make_subnet_config()]) + + def test__converts_dhcp_restart_failure_to_CannotConfigureDHCP(self): + self.patch_sudo_write_file() + self.patch_server_restart().side_effect = ( + ExternalProcessError(1, "sudo something")) + self.assertRaises( + exceptions.CannotConfigureDHCP, self.configure, + factory.make_name('key'), [make_subnet_config()]) + + +class TestCreateHostMaps(MAASTestCase): + + def test_creates_omshell(self): + omshell = self.patch(dhcp, "Omshell") + dhcp.create_host_maps([], sentinel.shared_key) + self.assertThat(omshell, MockCallsMatch( + call(server_address=ANY, shared_key=sentinel.shared_key), + )) + + def test_calls_omshell_create(self): + omshell_create = self.patch(Omshell, "create") + mappings = [ + {"ip_address": factory.make_ipv4_address(), + "mac_address": factory.make_mac_address()} + for _ in range(5) + ] + dhcp.create_host_maps(mappings, sentinel.shared_key) + self.assertThat(omshell_create, MockCallsMatch(*( + call(mapping["ip_address"], mapping["mac_address"]) + for mapping in mappings + ))) + + def test_raises_error_when_omshell_crashes(self): + error_message = factory.make_name("error").encode("ascii") + omshell_create = self.patch(Omshell, "create") + omshell_create.side_effect = ExternalProcessError( + returncode=2, cmd=("omshell",), output=error_message) + ip_address = factory.make_ipv4_address() + mac_address = factory.make_mac_address() + mappings = [{"ip_address": ip_address, "mac_address": mac_address}] + with FakeLogger("maas.dhcp") as logger: + error = self.assertRaises( + exceptions.CannotCreateHostMap, dhcp.create_host_maps, + mappings, sentinel.shared_key) + # The CannotCreateHostMap exception includes a message describing the + # problematic mapping. + self.assertDocTestMatches( + "%s -> %s: ..." % (mac_address, ip_address), + unicode(error)) + # A message is also written to the maas.dhcp logger that describes the + # problematic mapping. + self.assertDocTestMatches( + "Could not create host map for ... with address ...: ...", + logger.output) + + +class TestRemoveHostMaps(MAASTestCase): + + def setUp(self): + super(TestRemoveHostMaps, self).setUp() + self.patch(Omshell, "remove") + self.patch(Omshell, "nullify_lease") + + def test_removes_omshell(self): + omshell = self.patch(dhcp, "Omshell") + dhcp.remove_host_maps([], sentinel.shared_key) + self.assertThat(omshell, MockCallsMatch( + call(server_address=ANY, shared_key=sentinel.shared_key), + )) + + def test_calls_omshell_remove(self): + ip_addresses = [factory.make_ipv4_address() for _ in range(5)] + dhcp.remove_host_maps(ip_addresses, sentinel.shared_key) + self.assertThat(Omshell.remove, MockCallsMatch(*( + call(ip_address) for ip_address in ip_addresses + ))) + + def test_calls_omshell_nullify_lease(self): + ip_addresses = [factory.make_ipv4_address() for _ in range(5)] + dhcp.remove_host_maps(ip_addresses, sentinel.shared_key) + self.assertThat(Omshell.nullify_lease, MockCallsMatch(*( + call(ip_address) for ip_address in ip_addresses + ))) + + def test_raises_error_when_omshell_crashes(self): + error_message = factory.make_name("error").encode("ascii") + Omshell.remove.side_effect = ExternalProcessError( + returncode=2, cmd=("omshell",), output=error_message) + ip_address = factory.make_ipv4_address() + with FakeLogger("maas.dhcp") as logger: + error = self.assertRaises( + exceptions.CannotRemoveHostMap, dhcp.remove_host_maps, + [ip_address], sentinel.shared_key) + # The CannotRemoveHostMap exception includes a message describing the + # problematic mapping. + self.assertDocTestMatches("%s: ..." % ip_address, unicode(error)) + # A message is also written to the maas.dhcp logger that describes the + # problematic mapping. + self.assertDocTestMatches( + "Could not remove host map for ...: ...", + logger.output) + + +class TestStopAndDisableDHCP(MAASTestCase): + """Test how `DHCPServer` subclasses behave when given no subnets.""" + + scenarios = ( + ("DHCPv4", { + "server": dhcp.DHCPv4Server, + "stop_dhcp": (control, "stop_dhcpv4"), # For patching. + "expected_interfaces_file": dhcp.DHCPv4_INTERFACES_FILE, + "expected_config_file": dhcp.DHCPv4_CONFIG_FILE, + }), + ("DHCPv6", { + "server": dhcp.DHCPv6Server, + "stop_dhcp": (control, "stop_dhcpv6"), # For patching. + "expected_interfaces_file": dhcp.DHCPv6_INTERFACES_FILE, + "expected_config_file": dhcp.DHCPv6_CONFIG_FILE, + }), + ) + + def setUp(self): + super(TestStopAndDisableDHCP, self).setUp() + # Avoid trying to actually write a file via sudo. + self.sudo_write_file = self.patch_autospec(dhcp, "sudo_write_file") + # Avoid trying to actually stop a live DHCP server. + self.stop_dhcp = self.patch_autospec(*self.stop_dhcp) + + def test__writes_config_and_stops_dhcp_server(self): + omapi_key = factory.make_name('omapi-key') + server = self.server(omapi_key) + dhcp.configure(server, []) + + self.assertThat(self.sudo_write_file, MockCallsMatch( + call(self.expected_config_file, dhcp.DISABLED_DHCP_SERVER), + call(self.expected_interfaces_file, ""), + )) + self.assertThat(self.stop_dhcp, MockCalledOnceWith()) + + def test__raises_CannotConfigureDHCP_when_config_file_write_fails(self): + # Simulate a failure when writing the configuration file. + self.sudo_write_file.side_effect = ExternalProcessError( + randint(1, 99), [factory.make_name("command")], + factory.make_name("stderr")) + + omapi_key = factory.make_name('omapi-key') + server = self.server(omapi_key) + + self.assertRaises(CannotConfigureDHCP, dhcp.configure, server, []) + + self.assertThat(self.sudo_write_file, MockCalledOnceWith( + self.expected_config_file, dhcp.DISABLED_DHCP_SERVER)) + self.assertThat(self.stop_dhcp, MockNotCalled()) + + def test__raises_CannotStopDHCP_when_stop_fails(self): + # Simulate a failure when stopping the DHCP server. + self.stop_dhcp.side_effect = ExternalProcessError( + randint(1, 99), [factory.make_name("command")], + factory.make_name("stderr")) + + omapi_key = factory.make_name('omapi-key') + server = self.server(omapi_key) + + self.assertRaises(CannotConfigureDHCP, dhcp.configure, server, []) + + self.assertThat(self.sudo_write_file, MockCallsMatch( + call(self.expected_config_file, dhcp.DISABLED_DHCP_SERVER), + call(self.expected_interfaces_file, ""), + )) + self.assertThat(self.stop_dhcp, MockCalledOnceWith()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_docs.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_docs.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_docs.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_docs.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,75 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test the documentation of defined commands. + +Specifically, check :py:class:`~twisted.protocols.amp.Command` +subclasses in the MAAS codebase. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from inspect import getdoc +from itertools import chain +import re + +from maastesting.testcase import MAASTestCase +import provisioningserver.rpc.cluster +import provisioningserver.rpc.common +import provisioningserver.rpc.region +from testtools.matchers import ( + Annotate, + Contains, + MatchesAll, + MatchesRegex, + ) +from twisted.protocols import amp + + +def get_commands(module): + """Return command classes from the given module.""" + for name, value in vars(module).iteritems(): + if isinstance(value, type): + if issubclass(value, amp.Command): + yield value + + +class TestDocs(MAASTestCase): + + scenarios = sorted( + (command.__name__, {"command": command}) + for command in chain( + get_commands(provisioningserver.rpc.common), + get_commands(provisioningserver.rpc.cluster), + get_commands(provisioningserver.rpc.region), + ) + ) + + since_clause_missing_message = ( + "Command class does not have a :since: clause. The version in " + "which this command will be (or already has been) introduced " + "must be recorded, 1.6 for example." + ) + + since_clause_version_not_recognised = ( + "Command's :since: clause does not contain a recognised version, " + "1.6 for example." + ) + + def test_since_clause(self): + contains_since_clause = Annotate( + self.since_clause_missing_message, Contains(":since:")) + since_clause_contains_version = Annotate( + self.since_clause_version_not_recognised, MatchesRegex( + ".*^:since: *[1-9][.][0-9]+$", re.DOTALL | re.MULTILINE)) + self.assertThat(getdoc(self.command), MatchesAll( + contains_since_clause, since_clause_contains_version)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_exceptions.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_exceptions.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_exceptions.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for :py:mod:`provisioningserver.rpc.exceptions`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.testcase import MAASTestCase +from provisioningserver.rpc.exceptions import MultipleFailures +from twisted.python.failure import Failure + + +class TestMultipleFailures(MAASTestCase): + + def test__with_no_failures(self): + exc = MultipleFailures() + self.assertSequenceEqual([], exc.args) + + def test__with_single_failure(self): + errors = [AssertionError()] + failures = [Failure(error) for error in errors] + exc = MultipleFailures(*failures) + self.assertSequenceEqual(failures, exc.args) + + def test__with_multiple_failures(self): + errors = [AssertionError(), ZeroDivisionError()] + failures = [Failure(error) for error in errors] + exc = MultipleFailures(*failures) + self.assertSequenceEqual(failures, exc.args) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_module.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_module.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_module.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_module.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the top-level cluster RPC API.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ +] + +from maastesting.testcase import MAASTestCase +import provisioningserver +from provisioningserver.rpc.exceptions import NoConnectionsAvailable + + +class TestUtilities(MAASTestCase): + + def test_get_rpc_client_returns_client(self): + services = self.patch(provisioningserver, "services") + + client = provisioningserver.rpc.getRegionClient() + self.assertEqual( + services.getServiceNamed('rpc').getClient(), + client, + ) + + def test_error_when_cluster_services_are_down(self): + services = self.patch(provisioningserver, "services") + services.getServiceNamed.side_effect = KeyError + self.assertRaises( + NoConnectionsAvailable, + provisioningserver.rpc.getRegionClient) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_monitors.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_monitors.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_monitors.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_monitors.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,179 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for :py:module:`~provisioningserver.rpc.monitors`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from datetime import ( + datetime, + timedelta, + ) + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) +from mock import ( + Mock, + sentinel, + ) +from provisioningserver.rpc import monitors as monitors_module +from provisioningserver.rpc.monitors import ( + cancel_monitor, + running_monitors, + start_monitors, + ) +from provisioningserver.rpc.region import MonitorExpired +from provisioningserver.testing.testcase import PservTestCase +from testtools.matchers import ( + Contains, + Equals, + HasLength, + IsInstance, + Not, + ) +from twisted.internet.base import DelayedCall +from twisted.internet.task import Clock +from twisted.protocols import amp + + +def make_monitors(time_now=None): + """Make some StartMonitors, set to go off one second apart starting in + one second""" + if time_now is None: + time_now = datetime.now(amp.utc) + monitors = [] + for i in xrange(2): + monitors.append({ + "deadline": time_now + timedelta(seconds=i + 1), + "context": factory.make_name("context"), + "id": factory.make_name("id"), + }) + return monitors + + +class TestStartMonitors(PservTestCase): + """Tests for `~provisioningserver.rpc.monitors.start_monitors`.""" + + def tearDown(self): + super(TestStartMonitors, self).tearDown() + for dc, _ in running_monitors.viewvalues(): + if dc.active(): + dc.cancel() + running_monitors.clear() + + def test__sets_up_running_monitors(self): + clock = Clock() + monitors = make_monitors() + start_monitors(monitors, clock) + + self.expectThat(running_monitors, HasLength(len(monitors))) + for monitor in monitors: + id = monitor["id"] + self.expectThat(running_monitors[id], IsInstance(tuple)) + delayed_call, context = running_monitors[id] + self.expectThat(delayed_call, IsInstance(DelayedCall)) + self.expectThat(context, Equals(monitor["context"])) + + def test__reschedules_existing_monitor(self): + clock = Clock() + monitor_expired = self.patch_autospec( + monitors_module, "monitor_expired") + monitor_id = factory.make_name("id") + # The first monitor with the ID is scheduled as expected. + monitor1 = { + "deadline": datetime.now(amp.utc) + timedelta(seconds=10), + "context": sentinel.context1, "id": monitor_id, + } + start_monitors([monitor1], clock) + self.expectThat(running_monitors, HasLength(1)) + dc1, context = running_monitors[monitor_id] + self.assertAlmostEqual(dc1.getTime(), 10, delta=1) + self.assertIs(sentinel.context1, context) + self.assertTrue(dc1.active()) + # The second monitor with the ID is also scheduled as expected, taking + # the place of the previous monitor. + monitor2 = { + "deadline": monitor1["deadline"] + timedelta(seconds=10), + "context": sentinel.context2, "id": monitor_id, + } + start_monitors([monitor2], clock) + self.expectThat(running_monitors, HasLength(1)) + dc2, context = running_monitors[monitor_id] + self.assertAlmostEqual(dc2.getTime(), 20, delta=2) + self.assertIs(sentinel.context2, context) + self.assertTrue(dc2.active()) + # However, the first monitor has been cancelled, without calling back + # to the region. + self.assertTrue(dc1.cancelled, "First monitor has not been cancelled") + self.assertThat(monitor_expired, MockNotCalled()) + + def test__removes_from_running_monitors_when_monitor_expires(self): + self.patch(monitors_module, "getRegionClient") + clock = Clock() + monitors = make_monitors() + start_monitors(monitors, clock) + + # Expire the first monitor. + clock.advance(1) + self.assertThat(running_monitors, Not(Contains(monitors[0]["id"]))) + self.assertThat(running_monitors, Contains(monitors[1]["id"])) + + # Expire the other time. + clock.advance(1) + self.assertThat(running_monitors, Not(Contains(monitors[1]["id"]))) + + def test__calls_MonitorExpired_when_monitor_expires(self): + getRegionClient = self.patch(monitors_module, "getRegionClient") + client = Mock() + getRegionClient.return_value = client + clock = Clock() + monitors = make_monitors() + # Just use the first one for this test. + monitor = monitors[0] + start_monitors([monitor], clock) + clock.advance(1) + + self.assertThat( + client, + MockCalledOnceWith( + MonitorExpired, id=monitor["id"], + context=monitor["context"])) + + +class TestCancelMonitor(PservTestCase): + """Tests for `~provisioningserver.rpc.monitors.cancel_monitor`.""" + + def test__cancels_running_monitor(self): + monitors = make_monitors() + clock = Clock() + start_monitors(monitors, clock) + dc, _ = running_monitors[monitors[0]["id"]] + + cancel_monitor(monitors[0]["id"]) + + self.expectThat(running_monitors, Not(Contains(monitors[0]["id"]))) + self.expectThat(running_monitors, Contains(monitors[1]["id"])) + self.assertTrue(dc.cancelled) + + def test__silently_ignores_already_cancelled_monitor(self): + monitors = make_monitors() + clock = Clock() + self.addCleanup(running_monitors.clear) + start_monitors(monitors, clock) + + cancel_monitor(factory.make_string()) + + self.expectThat(running_monitors, Contains(monitors[0]["id"])) + self.expectThat(running_monitors, Contains(monitors[1]["id"])) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_osystems.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_osystems.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_osystems.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_osystems.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,244 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for :py:module:`~provisioningserver.rpc.osystems`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from collections import Iterable +import random + +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystemRegistry, + ) +from provisioningserver.rpc import ( + exceptions, + osystems, + ) +from provisioningserver.rpc.testing.doubles import StubOS +from provisioningserver.testing.os import make_osystem + + +class TestListOperatingSystemHelpers(MAASTestCase): + + def test_gen_operating_systems_returns_dicts_for_registered_oses(self): + # Patch in some operating systems with some randomised data. See + # StubOS for details of the rules that are used to populate the + # non-random elements. + os1 = StubOS("kermit", [ + ("statler", "Statler"), + ("waldorf", "Waldorf"), + ]) + os2 = StubOS("fozzie", [ + ("swedish-chef", "Swedish-Chef"), + ("beaker", "Beaker"), + ]) + self.patch( + osystems, "OperatingSystemRegistry", + [(os1.name, os1), (os2.name, os2)]) + # The `releases` field in the dict returned is populated by + # gen_operating_system_releases. That's not under test, so we + # mock it. + gen_operating_system_releases = self.patch( + osystems, "gen_operating_system_releases") + gen_operating_system_releases.return_value = sentinel.releases + # The operating systems are yielded in name order. + expected = [ + { + "name": "fozzie", + "title": "Fozzie", + "releases": sentinel.releases, + "default_release": "swedish-chef", + "default_commissioning_release": "beaker", + }, + { + "name": "kermit", + "title": "Kermit", + "releases": sentinel.releases, + "default_release": "statler", + "default_commissioning_release": "waldorf", + }, + ] + observed = osystems.gen_operating_systems() + self.assertIsInstance(observed, Iterable) + self.assertEqual(expected, list(observed)) + + def test_gen_operating_system_releases_returns_dicts_for_releases(self): + # Use an operating system with some randomised data. See StubOS + # for details of the rules that are used to populate the + # non-random elements. + osystem = StubOS("fozzie", [ + ("swedish-chef", "I Am The Swedish-Chef"), + ("beaker", "Beaker The Phreaker"), + ]) + expected = [ + { + "name": "swedish-chef", + "title": "I Am The Swedish-Chef", + "requires_license_key": False, + "can_commission": False, + }, + { + "name": "beaker", + "title": "Beaker The Phreaker", + "requires_license_key": True, + "can_commission": True, + }, + ] + observed = osystems.gen_operating_system_releases(osystem) + self.assertIsInstance(observed, Iterable) + self.assertEqual(expected, list(observed)) + + +class TestGetOSReleaseTitle(MAASTestCase): + + def test_returns_release_title(self): + os_name = factory.make_name('os') + title = factory.make_name('title') + purposes = [BOOT_IMAGE_PURPOSE.XINSTALL] + osystem = make_osystem(self, os_name, purposes) + release = random.choice(osystem.get_supported_releases()) + self.patch(osystem, 'get_release_title').return_value = title + self.assertEqual( + title, osystems.get_os_release_title(osystem.name, release)) + + def test_returns_empty_release_title_when_None_returned(self): + os_name = factory.make_name('os') + purposes = [BOOT_IMAGE_PURPOSE.XINSTALL] + osystem = make_osystem(self, os_name, purposes) + release = random.choice(osystem.get_supported_releases()) + self.patch(osystem, 'get_release_title').return_value = None + self.assertEqual( + "", osystems.get_os_release_title(osystem.name, release)) + + def test_throws_exception_when_os_does_not_exist(self): + self.assertRaises( + exceptions.NoSuchOperatingSystem, + osystems.get_os_release_title, + factory.make_name("no-such-os"), + factory.make_name("bogus-release")) + + +class TestValidateLicenseKeyErrors(MAASTestCase): + + def test_throws_exception_when_os_does_not_exist(self): + self.assertRaises( + exceptions.NoSuchOperatingSystem, + osystems.validate_license_key, + factory.make_name("no-such-os"), + factory.make_name("bogus-release"), + factory.make_name("key-to-not-much")) + + +class TestValidateLicenseKey(MAASTestCase): + + def test_validates_key(self): + os_name = factory.make_name('os') + purposes = [BOOT_IMAGE_PURPOSE.XINSTALL] + osystem = make_osystem(self, os_name, purposes) + release = random.choice(osystem.get_supported_releases()) + os_specific_validate_license_key = self.patch( + osystem, "validate_license_key") + osystems.validate_license_key( + osystem.name, release, sentinel.key) + self.assertThat( + os_specific_validate_license_key, + MockCalledOnceWith(release, sentinel.key)) + + +class TestGetPreseedDataErrors(MAASTestCase): + + def test_throws_exception_when_os_does_not_exist(self): + self.assertRaises( + exceptions.NoSuchOperatingSystem, + osystems.get_preseed_data, factory.make_name("no-such-os"), + sentinel.preseed_type, sentinel.node_system_id, + sentinel.node_hostname, sentinel.consumer_key, + sentinel.token_key, sentinel.token_secret, + sentinel.metadata_url) + + +class TestGetPreseedData(MAASTestCase): + + # Check for every OS. + scenarios = [ + (osystem.name, {"osystem": osystem}) + for _, osystem in OperatingSystemRegistry + ] + + def test_get_preseed_data_calls_compose_preseed(self): + # get_preseed_data() calls compose_preseed() on the + # OperatingSystem instances. + os_specific_compose_preseed = self.patch( + self.osystem, "compose_preseed") + metadata_url = factory.make_parsed_url() + osystems.get_preseed_data( + self.osystem.name, sentinel.preseed_type, + sentinel.node_system_id, sentinel.node_hostname, + sentinel.consumer_key, sentinel.token_key, + sentinel.token_secret, metadata_url) + self.assertThat( + os_specific_compose_preseed, + MockCalledOnceWith( + sentinel.preseed_type, + (sentinel.node_system_id, sentinel.node_hostname), + (sentinel.consumer_key, sentinel.token_key, + sentinel.token_secret), + metadata_url.geturl())) + + +class TestComposeCurtinNetworkPreseed(MAASTestCase): + + def make_args(self): + mac = factory.make_mac_address() + ipv4_net = factory.make_ipv4_network() + ipv4_addr = factory.pick_ip_in_network(ipv4_net) + ipv6_net = factory.make_ipv6_network() + ipv6_addr = factory.pick_ip_in_network(ipv6_net) + return { + 'interfaces': [(factory.make_name('eth'), mac)], + 'auto_interfaces': [mac], + 'ips_mapping': {mac: [ipv4_addr, ipv6_addr]}, + 'gateways_mapping': { + mac: [ + factory.make_ipv4_address(), + factory.make_ipv6_address(), + ], + }, + 'disable_ipv4': factory.pick_bool(), + 'nameservers': [factory.make_ipv6_address()], + 'netmasks': { + ipv4_addr: unicode(ipv4_net.netmask), + ipv6_addr: unicode(ipv6_net.netmask), + }, + } + + def test__forwards_to_OS_implementation(self): + args = self.make_args() + for os_name, osystem in OperatingSystemRegistry: + fake = self.patch(osystem, 'compose_curtin_network_preseed') + fake.return_value = factory.make_name('preseed-%s' % os_name) + self.assertEqual( + fake.return_value, + osystems.compose_curtin_network_preseed(os_name, **args)) + + def test__works_with_real_implementation(self): + ubuntu = OperatingSystemRegistry['ubuntu'] + args = self.make_args() + self.assertEqual( + ubuntu.compose_curtin_network_preseed(**args), + osystems.compose_curtin_network_preseed('ubuntu', **args)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_power.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_power.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_power.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_power.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,929 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for :py:module:`~provisioningserver.rpc.power`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import logging +import random + +from fixtures import FakeLogger +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockCallsMatch, + MockNotCalled, + ) +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +from mock import ( + ANY, + call, + DEFAULT, + Mock, + sentinel, + ) +import provisioningserver +from provisioningserver.events import EVENT_TYPES +from provisioningserver.power.poweraction import PowerActionFail +from provisioningserver.rpc import ( + exceptions, + power, + region, + ) +from provisioningserver.rpc.testing import ( + always_succeed_with, + MockClusterToRegionRPCFixture, + MockLiveClusterToRegionRPCFixture, + TwistedLoggerFixture, + ) +from testtools import ExpectedException +from testtools.deferredruntest import ( + assert_fails_with, + extract_result, + ) +from testtools.matchers import IsInstance +from twisted.internet import reactor +from twisted.internet.defer import ( + Deferred, + fail, + inlineCallbacks, + maybeDeferred, + returnValue, + succeed, + ) +from twisted.internet.task import Clock + + +def patch_power_action(test, return_value=DEFAULT, side_effect=None): + """Patch the PowerAction object. + + Patch the PowerAction object so that PowerAction().execute + is replaced by a Mock object created using the given `return_value` + and `side_effect`. + + This can be used to simulate various successes or failures patterns + while manipulating the power state of a node. + + Returns a tuple of mock objects: power.PowerAction and + power.PowerAction().execute. + """ + power_action_obj = Mock() + power_action_obj_execute = Mock( + return_value=return_value, side_effect=side_effect) + power_action_obj.execute = power_action_obj_execute + power_action = test.patch(power, 'PowerAction') + power_action.return_value = power_action_obj + return power_action, power_action_obj_execute + + +class TestPowerHelpers(MAASTestCase): + + def patch_rpc_methods(self): + fixture = self.useFixture(MockClusterToRegionRPCFixture()) + protocol, io = fixture.makeEventLoop( + region.MarkNodeFailed, region.UpdateNodePowerState, + region.SendEvent) + return protocol, io + + def test_power_change_success_emits_event(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_change = 'on' + protocol, io = self.patch_rpc_methods() + d = power.power_change_success(system_id, hostname, power_change) + io.flush() + self.assertThat( + protocol.UpdateNodePowerState, + MockCalledOnceWith( + ANY, + system_id=system_id, + power_state=power_change) + ) + self.assertThat( + protocol.SendEvent, + MockCalledOnceWith( + ANY, + type_name=EVENT_TYPES.NODE_POWERED_ON, + system_id=system_id, + description='') + ) + self.assertIsNone(extract_result(d)) + + def test_power_change_starting_emits_event(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_change = 'on' + protocol, io = self.patch_rpc_methods() + d = power.power_change_starting(system_id, hostname, power_change) + io.flush() + self.assertThat( + protocol.SendEvent, + MockCalledOnceWith( + ANY, + type_name=EVENT_TYPES.NODE_POWER_ON_STARTING, + system_id=system_id, + description='') + ) + self.assertIsNone(extract_result(d)) + + def test_power_change_failure_emits_event(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + message = factory.make_name('message') + power_change = 'on' + protocol, io = self.patch_rpc_methods() + d = power.power_change_failure( + system_id, hostname, power_change, message) + io.flush() + self.assertThat( + protocol.SendEvent, + MockCalledOnceWith( + ANY, + type_name=EVENT_TYPES.NODE_POWER_ON_FAILED, + system_id=system_id, + description=message) + ) + self.assertIsNone(extract_result(d)) + + def test_power_query_failure_emits_event(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + message = factory.make_name('message') + protocol, io = self.patch_rpc_methods() + d = power.power_query_failure( + system_id, hostname, message) + # This blocks until the deferred is complete + io.flush() + self.assertIsNone(extract_result(d)) + self.assertThat( + protocol.SendEvent, + MockCalledOnceWith( + ANY, + type_name=EVENT_TYPES.NODE_POWER_QUERY_FAILED, + system_id=system_id, + description=message) + ) + + def test_power_state_update_calls_UpdateNodePowerState(self): + system_id = factory.make_name('system_id') + state = random.choice(['on', 'off']) + protocol, io = self.patch_rpc_methods() + d = power.power_state_update( + system_id, state) + # This blocks until the deferred is complete + io.flush() + self.assertIsNone(extract_result(d)) + self.assertThat( + protocol.UpdateNodePowerState, + MockCalledOnceWith( + ANY, + system_id=system_id, + power_state=state) + ) + + +class TestChangePowerState(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + @inlineCallbacks + def patch_rpc_methods(self, return_value={}, side_effect=None): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop( + region.MarkNodeFailed, region.UpdateNodePowerState, + region.SendEvent) + protocol.MarkNodeFailed.return_value = return_value + protocol.MarkNodeFailed.side_effect = side_effect + self.addCleanup((yield connecting)) + returnValue(protocol.MarkNodeFailed) + + def test_change_power_state_calls_power_change_starting_early_on(self): + # The first, or one of the first, things that change_power_state() + # does is write to the node event log via power_change_starting(). + + class ArbitraryException(Exception): + """This allows us to return early from a function.""" + + # Raise this exception when power_change_starting() is called, to + # return early from change_power_state(). This lets us avoid set-up + # for parts of the function that we're presently not interested in. + self.patch_autospec(power, "power_change_starting") + power.power_change_starting.side_effect = ArbitraryException() + + d = power.change_power_state( + sentinel.system_id, sentinel.hostname, sentinel.power_type, + sentinel.power_change, sentinel.context) + self.assertRaises(ArbitraryException, extract_result, d) + self.assertThat( + power.power_change_starting, MockCalledOnceWith( + sentinel.system_id, sentinel.hostname, sentinel.power_change)) + + @inlineCallbacks + def test_change_power_state_changes_power_state(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power.power_action_registry[system_id] = power_change + # Patch the power action utility so that it says the node is + # in the required power state. + power_action, execute = patch_power_action( + self, return_value=power_change) + markNodeBroken = yield self.patch_rpc_methods() + + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + self.assertThat( + execute, + MockCallsMatch( + # One call to change the power state. + call(power_change=power_change, **context), + # One call to query the power state. + call(power_change='query', **context), + ), + ) + # The node hasn't been marked broken. + self.assertThat(markNodeBroken, MockNotCalled()) + + @inlineCallbacks + def test_change_power_state_doesnt_retry_for_certain_power_types(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + # Use a power type that is not among power.QUERY_POWER_TYPES. + power_type = factory.make_name('power_type') + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + power.power_action_registry[system_id] = power_change + self.patch(power, 'pause') + power_action, execute = patch_power_action( + self, return_value=random.choice(['on', 'off'])) + markNodeBroken = yield self.patch_rpc_methods() + + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + self.assertThat( + execute, + MockCallsMatch( + # Only one call to change the power state. + call(power_change=power_change, **context), + ), + ) + # The node hasn't been marked broken. + self.assertThat(markNodeBroken, MockNotCalled()) + + @inlineCallbacks + def test_change_power_state_retries_if_power_state_doesnt_change(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = 'on' + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power.power_action_registry[system_id] = power_change + # Simulate a failure to power up the node, then a success. + power_action, execute = patch_power_action( + self, side_effect=[None, 'off', None, 'on']) + markNodeBroken = yield self.patch_rpc_methods() + + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + self.assertThat( + execute, + MockCallsMatch( + call(power_change=power_change, **context), + call(power_change='query', **context), + call(power_change=power_change, **context), + call(power_change='query', **context), + ) + ) + # The node hasn't been marked broken. + self.assertThat(markNodeBroken, MockNotCalled()) + + @inlineCallbacks + def test_change_power_state_doesnt_retry_if_query_returns_unknown(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power.power_action_registry[system_id] = power_change + # Patch the power action utility so that it says the node is + # in the required power state. + power_action, execute = patch_power_action( + self, return_value="unknown") + markNodeBroken = yield self.patch_rpc_methods() + + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + self.assertThat( + execute, + MockCallsMatch( + # One call to change the power state. + call(power_change=power_change, **context), + # One call to query the power state. + call(power_change='query', **context), + ), + ) + # The node hasn't been marked broken. + self.assertThat(markNodeBroken, MockNotCalled()) + + @inlineCallbacks + def test_change_power_state_marks_the_node_broken_if_failure(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = 'on' + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power.power_action_registry[system_id] = power_change + # Simulate a persistent failure. + power_action, execute = patch_power_action( + self, return_value='off') + markNodeBroken = yield self.patch_rpc_methods() + + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + + # The node has been marked broken. + msg = "Timeout after %s tries" % len( + power.default_waiting_policy) + self.assertThat( + markNodeBroken, + MockCalledOnceWith( + ANY, + system_id=system_id, + error_description=msg) + ) + + @inlineCallbacks + def test_change_power_state_marks_the_node_broken_if_exception(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = 'on' + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power.power_action_registry[system_id] = power_change + # Simulate an exception. + exception_message = factory.make_name('exception') + power_action, execute = patch_power_action( + self, side_effect=PowerActionFail(exception_message)) + markNodeBroken = yield self.patch_rpc_methods() + + with ExpectedException(PowerActionFail): + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + + error_message = "Node could not be powered on: %s" % exception_message + self.assertThat( + markNodeBroken, MockCalledOnceWith( + ANY, system_id=system_id, error_description=error_message)) + + @inlineCallbacks + def test_change_power_state_pauses_inbetween_retries(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = 'on' + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + power.power_action_registry[system_id] = power_change + # Simulate two failures to power up the node, then a success. + power_action, execute = patch_power_action( + self, side_effect=[None, 'off', None, 'off', None, 'on']) + # Patch calls to pause() to `execute` so that we record both in the + # same place, and can thus see ordering. + self.patch(power, 'pause', execute) + + yield self.patch_rpc_methods() + + yield power.change_power_state( + system_id, hostname, power_type, power_change, context) + + self.assertThat(execute, MockCallsMatch( + call(power_change=power_change, **context), + call(1, reactor), # pause(1, reactor) + call(power_change='query', **context), + call(power_change=power_change, **context), + call(2, reactor), # pause(1, reactor) + call(power_change='query', **context), + )) + + +class TestPowerQuery(MAASTestCase): + + def setUp(self): + super(TestPowerQuery, self).setUp() + self.patch( + provisioningserver.rpc.power, 'deferToThread', maybeDeferred) + + def patch_rpc_methods(self, return_value={}, side_effect=None): + fixture = self.useFixture(MockClusterToRegionRPCFixture()) + protocol, io = fixture.makeEventLoop( + region.MarkNodeFailed, region.SendEvent, + region.UpdateNodePowerState) + protocol.MarkNodeFailed.return_value = return_value + protocol.MarkNodeFailed.side_effect = side_effect + return protocol.SendEvent, protocol.MarkNodeFailed, io + + def test_get_power_state_querys_node(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_state = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + # Patch the power action utility so that it says the node is + # in on/off power state. + power_action, execute = patch_power_action( + self, return_value=power_state) + _, markNodeBroken, io = self.patch_rpc_methods() + + d = power.get_power_state( + system_id, hostname, power_type, context) + # This blocks until the deferred is complete + io.flush() + self.assertEqual(power_state, extract_result(d)) + self.assertThat( + execute, + MockCallsMatch( + # One call to change the power state. + call(power_change='query', **context), + ), + ) + + def test_get_power_state_returns_unknown_for_certain_power_types(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + # Use a power type that is not among power.QUERY_POWER_TYPES. + power_type = factory.make_name('power_type') + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + _, _, io = self.patch_rpc_methods() + + d = power.get_power_state( + system_id, hostname, power_type, context) + + return assert_fails_with(d, PowerActionFail) + + def test_get_power_state_retries_if_power_query_fails(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_state = random.choice(['on', 'off']) + err_msg = factory.make_name('error') + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + # Simulate a failure to power query the node, then success. + power_action, execute = patch_power_action( + self, side_effect=[PowerActionFail(err_msg), power_state]) + sendEvent, markNodeBroken, io = self.patch_rpc_methods() + + d = power.get_power_state( + system_id, hostname, power_type, context) + # This blocks until the deferred is complete + io.flush() + self.assertEqual(power_state, extract_result(d)) + self.assertThat( + execute, + MockCallsMatch( + call(power_change='query', **context), + call(power_change='query', **context), + ) + ) + # The node hasn't been marked broken. + self.assertThat(markNodeBroken, MockNotCalled()) + + def test_get_power_state_changes_power_state_if_failure(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + err_msg = factory.make_name('error') + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power_state_update = self.patch_autospec(power, 'power_state_update') + + # Simulate a persistent failure. + power_action, execute = patch_power_action( + self, side_effect=PowerActionFail(err_msg)) + _, _, io = self.patch_rpc_methods() + + d = power.get_power_state( + system_id, hostname, power_type, context) + io.flush() + d.addCallback(self.fail) + + error = self.assertRaises(PowerActionFail, extract_result, d) + self.assertEqual(err_msg, unicode(error)) + self.assertThat( + power_state_update, MockCalledOnceWith(system_id, 'error')) + + def test_get_power_state_changes_power_state_if_success(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_state = random.choice(['on', 'off']) + power_type = random.choice(power.QUERY_POWER_TYPES) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power_state_update = self.patch_autospec(power, 'power_state_update') + + # Simulate success. + power_action, execute = patch_power_action( + self, return_value=power_state) + _, _, io = self.patch_rpc_methods() + + d = power.get_power_state( + system_id, hostname, power_type, context) + io.flush() + self.assertEqual(power_state, extract_result(d)) + self.assertThat( + power_state_update, MockCalledOnceWith(system_id, power_state)) + + def test_get_power_state_changes_power_state_if_unknown(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_state = "unknown" + power_type = random.choice(power.QUERY_POWER_TYPES) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + self.patch(power, 'pause') + power_state_update = self.patch_autospec(power, 'power_state_update') + + # Simulate success. + power_action, execute = patch_power_action( + self, return_value=power_state) + _, _, io = self.patch_rpc_methods() + + d = power.get_power_state( + system_id, hostname, power_type, context) + io.flush() + self.assertEqual(power_state, extract_result(d)) + self.assertThat( + power_state_update, MockCalledOnceWith(system_id, power_state)) + + def test_get_power_state_pauses_inbetween_retries(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + # Simulate two failures to power up the node, then a success. + power_action, execute = patch_power_action( + self, side_effect=[PowerActionFail, PowerActionFail, 'off']) + self.patch(power, "deferToThread", maybeDeferred) + _, _, io = self.patch_rpc_methods() + clock = Clock() + + calls_and_pause = [ + ([ + call(power_change='query', **context), + ], 3), + ([ + call(power_change='query', **context), + ], 5), + ([ + call(power_change='query', **context), + ], 10), + ] + calls = [] + d = power.get_power_state( + system_id, hostname, power_type, context, clock=clock) + for newcalls, waiting_time in calls_and_pause: + calls.extend(newcalls) + # This blocks until the deferred is complete + io.flush() + self.assertThat(execute, MockCallsMatch(*calls)) + clock.advance(waiting_time) + self.assertEqual("off", extract_result(d)) + + +class TestPowerQueryAsync(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def make_node(self, power_type=None): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + if power_type is None: + power_type = random.choice(power.QUERY_POWER_TYPES) + state = random.choice(['on', 'off', 'unknown', 'error']) + context = { + factory.make_name('context-key'): ( + factory.make_name('context-val')) + } + return { + 'context': context, + 'hostname': hostname, + 'power_state': state, + 'power_type': power_type, + 'system_id': system_id, + } + + def make_nodes(self, count=3): + nodes = [self.make_node() for _ in xrange(count)] + # Sanity check that these nodes are something that can emerge + # from a call to ListNodePowerParameters. + region.ListNodePowerParameters.makeResponse({"nodes": nodes}, None) + return nodes + + def pick_alternate_state(self, state): + return random.choice([ + value for value in ['on', 'off', 'unknown', 'error'] + if value != state]) + + @inlineCallbacks + def test_query_all_nodes_calls_get_power_state(self): + nodes = self.make_nodes() + # Report back that all nodes' power states are as recorded. + power_states = [node['power_state'] for node in nodes] + get_power_state = self.patch(power, 'get_power_state') + get_power_state.side_effect = [ + succeed(power_state) + for power_state in power_states + ] + + yield power.query_all_nodes(nodes) + self.assertThat(get_power_state, MockCallsMatch(*( + call( + node['system_id'], node['hostname'], + node['power_type'], node['context'], + clock=reactor) + for node in nodes + ))) + + @inlineCallbacks + def test_query_all_nodes_only_queries_queryable_power_types(self): + nodes = self.make_nodes() + # nodes are all queryable, so add one that isn't: + nodes.append(self.make_node(power_type='ether_wake')) + + # Report back that all nodes' power states are as recorded. + power_states = [node['power_state'] for node in nodes] + get_power_state = self.patch(power, 'get_power_state') + get_power_state.side_effect = [ + succeed(power_state) + for power_state in power_states + ] + + yield power.query_all_nodes(nodes) + self.assertThat(get_power_state, MockCallsMatch(*( + call( + node['system_id'], node['hostname'], + node['power_type'], node['context'], + clock=reactor) + for node in nodes + if node['power_type'] in power.QUERY_POWER_TYPES + ))) + + @inlineCallbacks + def test_query_all_nodes_swallows_PowerActionFail(self): + node1, node2 = self.make_nodes(2) + new_state_2 = self.pick_alternate_state(node2['power_state']) + get_power_state = self.patch(power, 'get_power_state') + error_msg = factory.make_name("error") + get_power_state.side_effect = [ + fail(PowerActionFail(error_msg)), succeed(new_state_2)] + + with FakeLogger("maas.power", level=logging.DEBUG) as maaslog: + yield power.query_all_nodes([node1, node2]) + + self.assertDocTestMatches( + """\ + hostname-...: Failed to query power state: %s. + hostname-...: Power state has changed from ... to ... + """ % error_msg, + maaslog.output) + + @inlineCallbacks + def test_query_all_nodes_swallows_NoSuchNode(self): + node1, node2 = self.make_nodes(2) + new_state_2 = self.pick_alternate_state(node2['power_state']) + get_power_state = self.patch(power, 'get_power_state') + get_power_state.side_effect = [ + fail(exceptions.NoSuchNode()), succeed(new_state_2)] + + with FakeLogger("maas.power", level=logging.DEBUG) as maaslog: + yield power.query_all_nodes([node1, node2]) + + self.assertDocTestMatches( + """\ + hostname-...: Could not update power status; no such node. + hostname-...: Power state has changed from ... to ... + """, + maaslog.output) + + @inlineCallbacks + def test_query_all_nodes_swallows_Exception(self): + node1, node2 = self.make_nodes(2) + new_state_2 = self.pick_alternate_state(node2['power_state']) + get_power_state = self.patch(power, 'get_power_state') + get_power_state.side_effect = [ + fail(Exception('unknown')), succeed(new_state_2)] + + with FakeLogger("maas.power", level=logging.DEBUG) as maaslog: + yield power.query_all_nodes([node1, node2]) + + self.assertDocTestMatches( + """\ + hostname-...: Failed to query power state, unknown error: unknown + hostname-...: Power state has changed from ... to ... + """, + maaslog.output) + + @inlineCallbacks + def test_query_all_nodes_returns_deferredlist_of_number_of_nodes(self): + node1, node2 = self.make_nodes(2) + get_power_state = self.patch(power, 'get_power_state') + get_power_state.side_effect = [ + succeed(node1['power_state']), succeed(node2['power_state'])] + + results = yield power.query_all_nodes([node1, node2]) + self.assertEqual( + [(True, node1['power_state']), (True, node2['power_state'])], + results) + + +class TestMaybeChangePowerState(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def patch_power_action_registry(self): + self.patch(power, 'power_action_registry', {}) + + def patch_methods_using_rpc(self): + self.patch_autospec(power, 'power_change_starting') + power.power_change_starting.side_effect = always_succeed_with(None) + + self.patch_autospec(power, 'change_power_state') + power.change_power_state.side_effect = always_succeed_with(None) + + def test_always_returns_deferred(self): + clock = Clock() + d = power.maybe_change_power_state( + sentinel.system_id, sentinel.hostname, sentinel.power_type, + random.choice(("on", "off")), sentinel.context, clock=clock) + self.assertThat(d, IsInstance(Deferred)) + + @inlineCallbacks + def test_adds_action_to_registry(self): + self.patch_methods_using_rpc() + self.patch_power_action_registry() + + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + + yield power.maybe_change_power_state( + system_id, hostname, power_type, power_change, context) + self.assertEqual( + {system_id: power_change}, + power.power_action_registry) + reactor.runUntilCurrent() # Run all delayed calls. + self.assertEqual({}, power.power_action_registry) + + @inlineCallbacks + def test_errors_when_change_already_registered(self): + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + + power.power_action_registry[system_id] = power_change + with ExpectedException(exceptions.PowerActionAlreadyInProgress): + yield power.maybe_change_power_state( + system_id, hostname, power_type, power_change, context) + + @inlineCallbacks + def test_calls_change_power_state_later(self): + self.patch_methods_using_rpc() + + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + + yield power.maybe_change_power_state( + system_id, hostname, power_type, power_change, context) + reactor.runUntilCurrent() # Run all delayed calls. + self.assertThat( + power.change_power_state, + MockCalledOnceWith( + system_id, hostname, power_type, power_change, context, + power.reactor)) + + @inlineCallbacks + def test_clears_lock_if_change_power_state_success(self): + self.patch_power_action_registry() + self.patch_methods_using_rpc() + + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + + yield power.maybe_change_power_state( + system_id, hostname, power_type, power_change, context) + reactor.runUntilCurrent() # Run all delayed calls. + self.assertNotIn(system_id, power.power_action_registry) + + @inlineCallbacks + def test_clears_lock_if_change_power_state_fails(self): + self.patch_power_action_registry() + + class TestException(Exception): + pass + + self.patch_autospec(power, 'power_change_starting') + power.power_change_starting.side_effect = TestException('boom') + + system_id = factory.make_name('system_id') + hostname = sentinel.hostname + power_type = sentinel.power_type + power_change = random.choice(['on', 'off']) + context = sentinel.context + + logger = self.useFixture(TwistedLoggerFixture()) + + yield power.maybe_change_power_state( + system_id, hostname, power_type, power_change, context) + reactor.runUntilCurrent() # Run all delayed calls. + self.assertNotIn(system_id, power.power_action_registry) + self.assertDocTestMatches( + "Unhandled Error...TestException: boom", + logger.dump()) + + @inlineCallbacks + def test__calls_change_power_state_with_timeout(self): + self.patch_power_action_registry() + self.patch_methods_using_rpc() + defer_with_timeout = self.patch(power, 'deferWithTimeout') + + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + power_type = random.choice(power.QUERY_POWER_TYPES) + power_change = random.choice(['on', 'off']) + context = { + factory.make_name('context-key'): factory.make_name('context-val') + } + + yield power.maybe_change_power_state( + system_id, hostname, power_type, power_change, context) + reactor.runUntilCurrent() # Run all delayed calls. + self.assertThat( + defer_with_timeout, MockCalledOnceWith( + power.CHANGE_POWER_STATE_TIMEOUT, + power.change_power_state, system_id, hostname, + power_type, power_change, context, power.reactor)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_tags.py maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_tags.py --- maas-1.5.4+bzr2294/src/provisioningserver/rpc/tests/test_tags.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/rpc/tests/test_tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,73 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for :py:module:`~provisioningserver.rpc.dhcp`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from apiclient.maas_client import ( + MAASClient, + MAASDispatcher, + MAASOAuth, + ) +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import ( + ANY, + sentinel, + ) +from provisioningserver.rpc import tags + + +class TestEvaluateTag(MAASTestCase): + + def setUp(self): + super(TestEvaluateTag, self).setUp() + get_maas_url = self.patch_autospec(tags, "get_maas_url") + get_maas_url.return_value = sentinel.maas_url + get_cluster_uuid = self.patch_autospec(tags, "get_cluster_uuid") + get_cluster_uuid.return_value = sentinel.cluster_uuid + + def test__calls_process_node_tags(self): + credentials = "aaa", "bbb", "ccc" + process_node_tags = self.patch_autospec(tags, "process_node_tags") + tags.evaluate_tag( + sentinel.tag_name, sentinel.tag_definition, sentinel.tag_nsmap, + credentials) + self.assertThat( + process_node_tags, MockCalledOnceWith( + tag_name=sentinel.tag_name, + tag_definition=sentinel.tag_definition, + tag_nsmap=sentinel.tag_nsmap, client=ANY, + nodegroup_uuid=sentinel.cluster_uuid)) + + def test__constructs_client_with_credentials(self): + consumer_key = factory.make_name("ckey") + resource_token = factory.make_name("rtok") + resource_secret = factory.make_name("rsec") + credentials = consumer_key, resource_token, resource_secret + + self.patch_autospec(tags, "process_node_tags") + self.patch_autospec(tags, "MAASOAuth").side_effect = MAASOAuth + + tags.evaluate_tag( + sentinel.tag_name, sentinel.tag_definition, sentinel.tag_nsmap, + credentials) + + client = tags.process_node_tags.call_args[1]["client"] + self.assertIsInstance(client, MAASClient) + self.assertEqual(sentinel.maas_url, client.url) + self.assertIsInstance(client.dispatcher, MAASDispatcher) + self.assertIsInstance(client.auth, MAASOAuth) + self.assertThat(tags.MAASOAuth, MockCalledOnceWith( + consumer_key, resource_token, resource_secret)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/security.py maas-1.7.6+bzr3376/src/provisioningserver/security.py --- maas-1.5.4+bzr2294/src/provisioningserver/security.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/security.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,190 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Cluster security code.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + "calculate_digest", + "get_shared_secret_filesystem_path", + "get_shared_secret_from_filesystem", +] + +from binascii import ( + a2b_hex, + b2a_hex, + ) +import errno +from hashlib import sha256 +from hmac import HMAC +from os import fchmod +from os.path import dirname +from sys import ( + stderr, + stdin, + ) + +from lockfile import FileLock +from provisioningserver.path import get_path +from provisioningserver.utils.fs import ( + ensure_dir, + read_text_file, + write_text_file, + ) + + +def to_hex(b): + """Convert byte string to hex encoding.""" + assert isinstance(b, bytes), "%r is not a byte string" % (b,) + return b2a_hex(b).decode("ascii") + + +def to_bin(u): + """Convert ASCII-only unicode string to hex encoding.""" + assert isinstance(u, unicode), "%r is not a unicode string" % (u,) + # Strip ASCII whitespace from u before converting. + return a2b_hex(u.encode("ascii").strip()) + + +def get_shared_secret_filesystem_path(): + """Return the path to shared-secret on the filesystem.""" + return get_path("var", "lib", "maas", "secret") + + +def get_shared_secret_from_filesystem(): + """Load the secret from the filesystem. + + `get_shared_secret_filesystem_path` defines where the file will be + written. If the directory does not already exist, this will attempt to + create it, including all parent directories. + + :return: A byte string of arbitrary length. + """ + secret_path = get_shared_secret_filesystem_path() + ensure_dir(dirname(secret_path)) + with FileLock(secret_path): + # Load secret from the filesystem, if it exists. + try: + secret_hex = read_text_file(secret_path) + except IOError as e: + if e.errno == errno.ENOENT: + return None + else: + raise + else: + return to_bin(secret_hex) + + +def set_shared_secret_on_filesystem(secret): + """Write the secret to the filesystem. + + `get_shared_secret_filesystem_path` defines where the file will be + written. If the directory does not already exist, this will attempt to + create it, including all parent directories. + + :type secret: A byte string of arbitrary length. + """ + secret_path = get_shared_secret_filesystem_path() + ensure_dir(dirname(secret_path)) + secret_hex = to_hex(secret) + with FileLock(secret_path): + # Ensure that the file has sensible permissions. + with open(secret_path, "ab") as secret_f: + fchmod(secret_f.fileno(), 0o640) + # Write secret to the filesystem. + write_text_file(secret_path, secret_hex) + + +def calculate_digest(secret, message, salt): + """Calculate a SHA-256 HMAC digest for the given data.""" + assert isinstance(secret, bytes), "%r is not a byte string." % (secret,) + assert isinstance(message, bytes), "%r is not byte string." % (message,) + assert isinstance(salt, bytes), "%r is not a byte string." % (salt,) + hmacr = HMAC(secret, digestmod=sha256) + hmacr.update(message) + hmacr.update(salt) + return hmacr.digest() + + +class InstallSharedSecretScript: + """Install a shared-secret onto a cluster. + + This class conforms to the contract that :py:func:`MainScript.register` + requires. + """ + + @staticmethod + def add_arguments(parser): + """Initialise options for storing a shared-secret. + + :param parser: An instance of :class:`ArgumentParser`. + """ + + @staticmethod + def run(args): + """Install a shared-secret to this cluster. + + When invoked interactively, you'll be prompted to enter the secret. + Otherwise the secret will be read from the first line of stdin. + + In both cases, the secret must be hex/base16 encoded. + """ + # Obtain the secret from the invoker. + if stdin.isatty(): + try: + secret_hex = raw_input("Secret (hex/base16 encoded): ") + except EOFError: + print() # So that the shell prompt appears on the next line. + raise SystemExit(1) + except KeyboardInterrupt: + print() # So that the shell prompt appears on the next line. + raise + else: + secret_hex = stdin.readline() + # Decode and install the secret. + try: + secret = secret_hex.strip().decode("hex") + except TypeError as error: + print("Secret could not be decoded:", unicode(error), file=stderr) + raise SystemExit(1) + else: + set_shared_secret_on_filesystem(secret) + shared_secret_path = get_shared_secret_filesystem_path() + print("Secret installed to %s." % shared_secret_path) + raise SystemExit(0) + + +class CheckForSharedSecretScript: + """Check for the presence of a shared-secret on a cluster. + + This class conforms to the contract that :py:func:`MainScript.register` + requires. + """ + + @staticmethod + def add_arguments(parser): + """Initialise options for checking the presence of a shared-secret. + + :param parser: An instance of :class:`ArgumentParser`. + """ + + @staticmethod + def run(args): + """Check for the presence of a shared-secret on this cluster. + + Exits 0 (zero) if a shared-secret has been installed. + """ + if get_shared_secret_from_filesystem() is None: + print("Shared-secret is NOT installed.") + raise SystemExit(1) + else: + print("Shared-secret is installed.") + raise SystemExit(0) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/services.py maas-1.7.6+bzr3376/src/provisioningserver/services.py --- maas-1.5.4+bzr2294/src/provisioningserver/services.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/services.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,109 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Additional services that compose the MAAS Provisioning Server.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - "LogService", - "OOPSService", - ] - -import signal -import sys - -import oops -from oops_datedir_repo import DateDirRepo -from oops_twisted import ( - Config as oops_config, - defer_publisher, - OOPSObserver, - ) -from twisted.application.service import Service -from twisted.internet import reactor -from twisted.python.log import ( - addObserver, - FileLogObserver, - removeObserver, - ) -from twisted.python.logfile import LogFile - - -class LogService(Service): - - name = "log" - - def __init__(self, filename): - self.filename = filename - self.logfile = None - self.observer = None - - def _signal_handler(self, sig, frame): - reactor.callFromThread(self.logfile.reopen) - - def startService(self): - Service.startService(self) - if self.filename != '-': - self.logfile = LogFile.fromFullPath( - self.filename, rotateLength=None, defaultMode=0o644) - self.__previous_signal_handler = signal.signal( - signal.SIGUSR1, self._signal_handler) - else: - self.logfile = sys.stdout - self.observer = FileLogObserver(self.logfile) - self.observer.start() - - def stopService(self): - Service.stopService(self) - if self.filename != '-': - signal.signal(signal.SIGUSR1, self.__previous_signal_handler) - del self.__previous_signal_handler - self.observer.stop() - self.observer = None - self.logfile.close() - self.logfile = None - else: - self.observer.stop() - self.observer = None - # Don't close stdout. - self.logfile = None - - -class OOPSService(Service): - - name = "oops" - - def __init__(self, log_service, oops_dir, oops_reporter): - self.config = None - self.log_service = log_service - self.oops_dir = oops_dir - self.oops_reporter = oops_reporter - - def startService(self): - Service.startService(self) - self.config = oops_config() - # Add the oops publisher that writes files in the configured place if - # the command line option was set. - if self.oops_dir: - repo = DateDirRepo(self.oops_dir) - self.config.publishers.append( - defer_publisher(oops.publish_new_only(repo.publish))) - if self.oops_reporter: - self.config.template['reporter'] = self.oops_reporter - self.observer = OOPSObserver( - self.config, self.log_service.observer.emit) - addObserver(self.observer.emit) - - def stopService(self): - Service.stopService(self) - removeObserver(self.observer.emit) - self.observer = None - self.config = None diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/specs/amqp0-8.xml maas-1.7.6+bzr3376/src/provisioningserver/specs/amqp0-8.xml --- maas-1.5.4+bzr2294/src/provisioningserver/specs/amqp0-8.xml 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/specs/amqp0-8.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,771 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/start_cluster_controller.py maas-1.7.6+bzr3376/src/provisioningserver/start_cluster_controller.py --- maas-1.5.4+bzr2294/src/provisioningserver/start_cluster_controller.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/start_cluster_controller.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,185 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Command: start the cluster controller.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'add_arguments', - 'run', - ] - -from grp import getgrnam -import httplib -import json -from logging import getLogger -import os -from pwd import getpwnam -from time import sleep -from urllib2 import ( - HTTPError, - URLError, - ) - -from apiclient.maas_client import ( - MAASClient, - MAASDispatcher, - NoAuth, - ) -from provisioningserver.cluster_config import get_cluster_uuid -from provisioningserver.network import discover_networks - - -logger = getLogger(__name__) - - -class ClusterControllerRejected(Exception): - """Request to become a cluster controller has been rejected.""" - - -def add_arguments(parser): - """For use by :class:`MainScript`.""" - parser.add_argument( - 'server_url', metavar='URL', help="URL to the MAAS region controller.") - parser.add_argument( - '--user', '-u', metavar='USER', default='maas', - help="System user identity that should run the cluster controller.") - parser.add_argument( - '--group', '-g', metavar='GROUP', default='maas', - help="System group that should run the cluster controller.") - - -def log_error(exception): - logger.info( - "Could not register with region controller: %s." - % exception.reason) - - -def make_anonymous_api_client(server_url): - """Create an unauthenticated API client.""" - return MAASClient(NoAuth(), MAASDispatcher(), server_url) - - -def register(server_url): - """Request Rabbit connection details from the domain controller. - - Offers this machine to the region controller as a potential cluster - controller. - - :param server_url: URL to the region controller's MAAS API. - :return: A dict of connection details if this cluster controller has been - accepted, or `None` if there is no definite response yet. If there - is no definite response, retry this call later. - :raise ClusterControllerRejected: if this system has been rejected as a - cluster controller. - """ - known_responses = {httplib.OK, httplib.FORBIDDEN, httplib.ACCEPTED} - - interfaces = json.dumps(discover_networks()) - client = make_anonymous_api_client(server_url) - cluster_uuid = get_cluster_uuid() - try: - response = client.post( - 'api/1.0/nodegroups/', 'register', - interfaces=interfaces, uuid=cluster_uuid) - except HTTPError as e: - status_code = e.code - if e.code not in known_responses: - log_error(e) - # Unknown error. Keep trying. - return None - except URLError as e: - log_error(e) - # Unknown error. Keep trying. - return None - else: - status_code = response.getcode() - - if status_code == httplib.OK: - # Our application has been approved. Proceed. - return json.loads(response.read()) - elif status_code == httplib.ACCEPTED: - # Our application is still waiting for approval. Keep trying. - return None - elif status_code == httplib.FORBIDDEN: - # Our application has been rejected. Give up. - raise ClusterControllerRejected( - "This system has been rejected as a cluster controller.") - else: - raise AssertionError("Unexpected return code: %r" % status_code) - - -def start_celery(server_url, connection_details, user, group): - broker_url = connection_details['BROKER_URL'] - uid = getpwnam(user).pw_uid - gid = getgrnam(group).gr_gid - - # Copy environment, but also tell celeryd what broker to listen to - # and the URL for the region controller. - env = dict( - os.environ, CELERY_BROKER_URL=broker_url, MAAS_URL=server_url) - command = 'celeryd', '--beat', '--queues', get_cluster_uuid() - - # Change gid first, just in case changing the uid might deprive - # us of the privileges required to setgid. - os.setgid(gid) - os.setuid(uid) - - os.execvpe(command[0], command, env=env) - - -def request_refresh(server_url): - client = make_anonymous_api_client(server_url) - try: - client.post('api/1.0/nodegroups/', 'refresh_workers') - except URLError as e: - logger.warn( - "Could not request secrets from region controller: %s" - % e.reason) - - -def start_up(server_url, connection_details, user, group): - """We've been accepted as a cluster controller; start doing the job. - - This starts up celeryd, listening to the broker that the region - controller pointed us to, and on the appropriate queue. - """ - # Get the region controller to send out credentials. If it arrives - # before celeryd has started up, we should find the message waiting - # in our queue. Even if we're new and the queue did not exist yet, - # the arriving task will create the queue. - request_refresh(server_url) - start_celery(server_url, connection_details, user=user, group=group) - - -def set_up_logging(): - """Set up logging.""" - # This import has side effects (it imports celeryconfig) and may - # produce warnings (if there is no celeryconfig). - # Postpone the import so that we don't go through that every time - # anything imports this module. - from celery.app import app_or_default - app_or_default().log.setup_logging_subsystem() - - -def run(args): - """Start the cluster controller. - - If this system is still awaiting approval as a cluster controller, this - command will keep looping until it gets a definite answer. - """ - set_up_logging() - connection_details = register(args.server_url) - while connection_details is None: - sleep(60) - connection_details = register(args.server_url) - start_up( - args.server_url, connection_details, user=args.user, group=args.group) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tags.py maas-1.7.6+bzr3376/src/provisioningserver/tags.py --- maas-1.5.4+bzr2294/src/provisioningserver/tags.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,9 +1,7 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). -"""Celery jobs for managing tags. - -""" +"""Cluster-side evaluation of tags.""" from __future__ import ( absolute_import, @@ -17,7 +15,6 @@ __all__ = [ 'merge_details', 'merge_details_cleanly', - 'MissingCredentials', 'process_node_tags', ] @@ -25,33 +22,17 @@ from collections import OrderedDict from functools import partial import httplib -from logging import getLogger import urllib2 -from apiclient.maas_client import ( - MAASClient, - MAASDispatcher, - MAASOAuth, - ) import bson from lxml import etree -from provisioningserver.auth import ( - get_recorded_api_credentials, - get_recorded_nodegroup_uuid, - ) -from provisioningserver.cluster_config import get_maas_url -from provisioningserver.utils import ( - classify, - try_match_xpath, - ) +from provisioningserver.logger import get_maas_logger +from provisioningserver.utils import classify +from provisioningserver.utils.xpath import try_match_xpath import simplejson as json -logger = getLogger(__name__) - - -class MissingCredentials(Exception): - """The MAAS URL or credentials are not yet set.""" +maaslog = get_maas_logger("tag_processing") # An example laptop's lshw XML dump was 135kB. An example lab's LLDP @@ -62,25 +43,6 @@ DEFAULT_BATCH_SIZE = 100 -def get_cached_knowledge(): - """Get all the information that we need to know, or raise an error. - - :return: (client, nodegroup_uuid) - """ - api_credentials = get_recorded_api_credentials() - if api_credentials is None: - logger.error("Not updating tags: don't have API key yet.") - return None, None - nodegroup_uuid = get_recorded_nodegroup_uuid() - if nodegroup_uuid is None: - logger.error("Not updating tags: don't have UUID yet.") - return None, None - client = MAASClient( - MAASOAuth(*api_credentials), MAASDispatcher(), - get_maas_url()) - return client, nodegroup_uuid - - # A content-type: function mapping that can decode data of that type. decoders = { "application/json": lambda data: json.loads(data), @@ -150,7 +112,7 @@ :param removed: Set of nodes to remove """ path = '/api/1.0/tags/%s/' % (tag_name,) - logger.debug( + maaslog.debug( "Updating nodes for %s %s, adding %s removing %s" % (tag_name, uuid, len(added), len(removed))) try: @@ -163,7 +125,7 @@ msg = e.fp.read() else: msg = e.msg - logger.info("Got a CONFLICT while updating tag: %s", msg) + maaslog.info("Got a CONFLICT while updating tag: %s", msg) return {} raise @@ -198,7 +160,7 @@ try: lshw = etree.fromstring(xmldata) except etree.XMLSyntaxError as e: - logger.warn("Invalid lshw details: %s", e) + maaslog.warn("Invalid lshw details: %s", e) del details["lshw"] # Don't process again later. else: # We're throwing away the existing root, but we can adopt @@ -218,7 +180,7 @@ try: detail = etree.fromstring(xmldata) except etree.XMLSyntaxError as e: - logger.warn("Invalid %s details: %s", namespace, e) + maaslog.warn("Invalid %s details: %s", namespace, e) else: # Add the namespace to all unqualified elements. for elem in detail.iter("{}*"): @@ -327,7 +289,7 @@ def process_all(client, tag_name, tag_definition, nodegroup_uuid, system_ids, xpath, batch_size=None): - logger.debug( + maaslog.debug( "processing %d system_ids for tag %s nodegroup %s", len(system_ids), tag_name, nodegroup_uuid) @@ -337,7 +299,7 @@ batches = gen_batches(system_ids, batch_size) node_details = gen_node_details(client, nodegroup_uuid, batches) nodes_matched, nodes_unmatched = classify( - partial(try_match_xpath, xpath, logger=logger), node_details) + partial(try_match_xpath, xpath, logger=maaslog), node_details) # Upload all updates for one nodegroup at one time. This should be no more # than ~41*10,000 = 410kB. That should take <1s even on a 10Mbit network. @@ -348,20 +310,18 @@ nodes_matched, nodes_unmatched) -def process_node_tags(tag_name, tag_definition, tag_nsmap, batch_size=None): +def process_node_tags( + tag_name, tag_definition, tag_nsmap, + client, nodegroup_uuid, batch_size=None): """Update the nodes for a new/changed tag definition. + :param client: A `MAASClient` used to fetch the node's details via + calls to the web API. + :param nodegroup_uuid: The UUID for this cluster. :param tag_name: Name of the tag to update nodes for :param tag_definition: Tag definition :param batch_size: Size of batch """ - client, nodegroup_uuid = get_cached_knowledge() - if not all([client, nodegroup_uuid]): - logger.error( - "Unable to update tag: %s for definition %r. " - "Please refresh secrets, then rebuild this tag." - % (tag_name, tag_definition)) - raise MissingCredentials() # We evaluate this early, so we can fail before sending a bunch of data to # the server xpath = etree.XPath(tag_definition, namespaces=tag_nsmap) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tasks.py maas-1.7.6+bzr3376/src/provisioningserver/tasks.py --- maas-1.5.4+bzr2294/src/provisioningserver/tasks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tasks.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,73 +13,29 @@ __metaclass__ = type __all__ = [ - 'power_off', - 'power_on', - 'refresh_secrets', - 'report_boot_images', 'rndc_command', - 'setup_rndc_configuration', - 'restart_dhcp_server', - 'stop_dhcp_server', - 'write_dhcp_config', 'write_dns_config', 'write_dns_zone_config', 'write_full_dns_config', ] from functools import wraps -from logging import getLogger -import os from subprocess import CalledProcessError from celery.app import app_or_default from celery.task import task -from provisioningserver import ( - boot_images, - tags, - ) -from provisioningserver.auth import ( - record_api_credentials, - record_nodegroup_uuid, - ) -from provisioningserver.custom_hardware.seamicro import ( - probe_seamicro15k_and_enlist, - ) -from provisioningserver.custom_hardware.ucsm import probe_and_enlist_ucsm -from provisioningserver.custom_hardware.virsh import probe_virsh_and_enlist -from provisioningserver.dhcp import ( - config, - detect, - ) -from provisioningserver.dhcp.leases import upload_leases from provisioningserver.dns.config import ( DNSConfig, execute_rndc_command, set_up_options_conf, - setup_rndc, - ) -from provisioningserver.drivers.hardware.mscm import probe_and_enlist_mscm -from provisioningserver.omshell import Omshell -from provisioningserver.power.poweraction import ( - PowerAction, - PowerActionFail, ) -from provisioningserver.utils import ( - call_and_check, - find_ip_via_arp, - sudo_write_file, - ) - -# For each item passed to refresh_secrets, a refresh function to give it to. -refresh_functions = { - 'api_credentials': record_api_credentials, - 'nodegroup_uuid': record_nodegroup_uuid, -} +from provisioningserver.logger import get_maas_logger +from provisioningserver.logger.utils import log_call celery_config = app_or_default().conf -logger = getLogger(__name__) +maaslog = get_maas_logger("tasks") # The tasks catch bare exceptions in an attempt to circumvent Celery's @@ -87,7 +43,7 @@ # error message contained in the exception itself! The message is # printed and then the exception re-raised so that it marks the task as # failed - in doing so it logs the stack trace, which is why the code -# does not do a simple logger.exception(exc). +# does not do a simple maaslog.exception(exc). def log_exception_text(func): """Wrap a function and log any exception text raised.""" @wraps(func) @@ -95,100 +51,11 @@ try: func(*args, **kwargs) except Exception as e: - logger.error("%s: %s", func.__name__, unicode(e)) + maaslog.error("%s: %s", func.__name__, unicode(e)) raise return wrapper -@task -@log_exception_text -def refresh_secrets(**kwargs): - """Update the worker's knowledge of various secrets it needs. - - The worker shares some secrets with the MAAS server, such as its - omapi key for talking to the DHCP server, and its MAAS API credentials. - When the server sends tasks to the worker, the tasks will include these - secrets as needed. But not everything the worker does is initiated by - a server request, so it needs copies of these secrets at hand. - - We don't store these secrets in the worker, but we hold copies in - memory. The worker won't perform jobs that require secrets it does - not have yet, waiting instead for the next chance to catch up. - - To make sure that the worker does not have to wait too long, the server - can send periodic `refresh_secrets` messages with the required - information. - - Tasks can also call `refresh_secrets` to record information they receive - from the server. - - All refreshed items are passed as keyword arguments, to avoid confusion - and allow for easy reordering. All refreshed items are optional. An - item that is not passed will not be refreshed, so it's entirely valid to - call this for just a single item. However `None` is a value like any - other, so passing `foo=None` will cause item `foo` to be refreshed with - value `None`. - - To help catch simple programming mistakes, passing an unknown argument - will result in an assertion failure. - - :param api_credentials: A colon separated string containing this - worker's credentials for accessing the MAAS API: consumer key, - resource token, resource secret. - :param nodegroup_uuid: The uuid of the node group that this worker - manages. - """ - for key, value in kwargs.items(): - assert key in refresh_functions, "Unknown refresh item: %s" % key - refresh_functions[key](value) - - -# ===================================================================== -# Power-related tasks -# ===================================================================== - - -def issue_power_action(power_type, power_change, **kwargs): - """Issue a power action to a node. - - :param power_type: The node's power type. Must have a corresponding - power template. - :param power_change: The change to request: 'on' or 'off'. - :param **kwargs: Keyword arguments are passed on to :class:`PowerAction`. - """ - assert power_change in ('on', 'off'), ( - "Unknown power change keyword: %s" % power_change) - kwargs['power_change'] = power_change - if 'mac_address' in kwargs: - kwargs['ip_address'] = find_ip_via_arp(kwargs['mac_address']) - kwargs.setdefault('ip_address', None) - try: - pa = PowerAction(power_type) - pa.execute(**kwargs) - except PowerActionFail: - # TODO: signal to webapp that it failed - - # Re-raise, so the job is marked as failed. Only currently - # useful for tests. - raise - - # TODO: signal to webapp that it worked. - - -@task -@log_exception_text -def power_on(power_type, **kwargs): - """Turn a node on.""" - issue_power_action(power_type, 'on', **kwargs) - - -@task -@log_exception_text -def power_off(power_type, **kwargs): - """Turn a node off.""" - issue_power_action(power_type, 'off', **kwargs) - - # ===================================================================== # DNS-related tasks # ===================================================================== @@ -200,7 +67,8 @@ RNDC_COMMAND_RETRY_DELAY = 2 -@task(max_retries=RNDC_COMMAND_MAX_RETRY, queue=celery_config.WORKER_QUEUE_DNS) +@task(max_retries=RNDC_COMMAND_MAX_RETRY) +@log_call() @log_exception_text def rndc_command(arguments, retry=False, callback=None): """Use rndc to execute a command. @@ -218,13 +86,14 @@ return rndc_command.retry( exc=exc, countdown=RNDC_COMMAND_RETRY_DELAY) else: - logger.error("rndc_command failed: %s", unicode(exc)) + maaslog.error("rndc_command failed: %s", unicode(exc)) raise if callback is not None: callback.delay() -@task(queue=celery_config.WORKER_QUEUE_DNS) +@task +@log_call() @log_exception_text def write_full_dns_config(zones=None, callback=None, **kwargs): """Write out the DNS configuration files: the main configuration @@ -247,7 +116,8 @@ callback.delay() -@task(queue=celery_config.WORKER_QUEUE_DNS) +@task +@log_call() @log_exception_text def write_dns_config(zones=(), callback=None, **kwargs): """Write out the DNS configuration file. @@ -265,7 +135,8 @@ callback.delay() -@task(queue=celery_config.WORKER_QUEUE_DNS) +@task +@log_call() @log_exception_text def write_dns_zone_config(zones, callback=None, **kwargs): """Write out DNS zones. @@ -280,226 +151,3 @@ zone.write_config() if callback is not None: callback.delay() - - -@task(queue=celery_config.WORKER_QUEUE_DNS) -@log_exception_text -def setup_rndc_configuration(callback=None): - """Write out the two rndc configuration files (rndc.conf and - named.conf.rndc). - - :param callback: Callback subtask. - :type callback: callable - """ - setup_rndc() - if callback is not None: - callback.delay() - - -# ===================================================================== -# DHCP-related tasks -# ===================================================================== - - -@task -@log_exception_text -def upload_dhcp_leases(): - """Upload DHCP leases. - - Uploads leases to the MAAS API, using cached credentials -- the task - originates with celerybeat, not with a server request. - """ - upload_leases() - - -@task -@log_exception_text -def add_new_dhcp_host_map(mappings, server_address, shared_key): - """Add address mappings to the DHCP server. - - Do not invoke this when DHCP is set to be managed manually. - - :param mappings: A dict of new IP addresses, and the MAC addresses they - translate to. - :param server_address: IP or hostname for the DHCP server - :param shared_key: The HMAC-MD5 key that the DHCP server uses for access - control. - """ - omshell = Omshell(server_address, shared_key) - try: - for ip_address, mac_address in mappings.items(): - omshell.create(ip_address, mac_address) - except CalledProcessError as e: - # TODO signal to webapp that the job failed. - - # Re-raise, so the job is marked as failed. Only currently - # useful for tests. - logger.error("add_new_dhcp_host_map failed: %s", unicode(e)) - raise - - -@task -@log_exception_text -def remove_dhcp_host_map(ip_address, server_address, omapi_key): - """Remove an IP to MAC mapping in the DHCP server. - - Do not invoke this when DHCP is set to be managed manually. - - :param ip_address: Dotted quad string - :param server_address: IP or hostname for the DHCP server - :param omapi_key: The HMAC-MD5 key that the DHCP server uses for access - control. - """ - omshell = Omshell(server_address, omapi_key) - try: - omshell.remove(ip_address) - except CalledProcessError as e: - # TODO signal to webapp that the job failed. - - # Re-raise, so the job is marked as failed. Only currently - # useful for tests. - logger.error("remove_dhcp_host_map failed: %s", unicode(e)) - raise - - -@task -@log_exception_text -def write_dhcp_config(callback=None, **kwargs): - """Write out the DHCP configuration file and restart the DHCP server. - - :param dhcp_interfaces: Space-separated list of interfaces that the - DHCP server should listen on. - :param **kwargs: Keyword args passed to dhcp.config.get_config() - """ - sudo_write_file( - celery_config.DHCP_CONFIG_FILE, config.get_config(**kwargs)) - sudo_write_file( - celery_config.DHCP_INTERFACES_FILE, kwargs.get('dhcp_interfaces', '')) - if callback is not None: - callback.delay() - - -@task -@log_exception_text -def restart_dhcp_server(): - """Restart the DHCP server.""" - call_and_check(['sudo', '-n', 'service', 'maas-dhcp-server', 'restart']) - - -# Message to put in the DHCP config file when the DHCP server gets stopped. -DISABLED_DHCP_SERVER = "# DHCP server stopped." - - -@task -@log_exception_text -def stop_dhcp_server(): - """Write a blank config file and stop a DHCP server.""" - # Write an empty config file to avoid having an outdated config laying - # around. - sudo_write_file( - celery_config.DHCP_CONFIG_FILE, DISABLED_DHCP_SERVER) - call_and_check(['sudo', '-n', 'service', 'maas-dhcp-server', 'stop']) - - -@task -@log_exception_text -def periodic_probe_dhcp(): - """Probe for foreign DHCP servers.""" - detect.periodic_probe_task() - - -# ===================================================================== -# Boot images-related tasks -# ===================================================================== - - -@task -@log_exception_text -def report_boot_images(): - """For master worker only: report available netboot images.""" - boot_images.report_to_server() - - -# How many times should a update node tags task be retried? -UPDATE_NODE_TAGS_MAX_RETRY = 10 - -# How long to wait between update node tags task retries (in seconds)? -UPDATE_NODE_TAGS_RETRY_DELAY = 2 - - -# ===================================================================== -# Tags-related tasks -# ===================================================================== - - -@task(max_retries=UPDATE_NODE_TAGS_MAX_RETRY) -@log_exception_text -def update_node_tags(tag_name, tag_definition, tag_nsmap, retry=True): - """Update the nodes for a new/changed tag definition. - - :param tag_name: Name of the tag to update nodes for - :param tag_definition: Tag definition - :param retry: Whether to retry on failure - """ - try: - tags.process_node_tags(tag_name, tag_definition, tag_nsmap) - except tags.MissingCredentials, exc: - if retry: - return update_node_tags.retry( - exc=exc, countdown=UPDATE_NODE_TAGS_RETRY_DELAY) - else: - raise - - -# ===================================================================== -# Image importing-related tasks -# ===================================================================== - -@task -@log_exception_text -def import_boot_images(http_proxy=None, callback=None): - env = dict(os.environ) - if http_proxy is not None: - env['http_proxy'] = http_proxy - env['https_proxy'] = http_proxy - call_and_check(['sudo', '-n', '-E', 'maas-import-pxe-files'], env=env) - if callback is not None: - callback.delay() - - -# ===================================================================== -# Custom hardware tasks -# ===================================================================== - -@task -@log_exception_text -def add_seamicro15k(mac, username, password, power_control=None): - """ See `maasserver.api.NodeGroup.add_seamicro15k`. """ - ip = find_ip_via_arp(mac) - if ip is not None: - probe_seamicro15k_and_enlist( - ip, username, password, - power_control=power_control) - else: - logger.warning("Couldn't find IP address for MAC %s" % mac) - - -@task -@log_exception_text -def add_virsh(poweraddr, password=None): - """ See `maasserver.api.NodeGroup.add_virsh`. """ - probe_virsh_and_enlist(poweraddr, password=password) - - -@task -@log_exception_text -def enlist_nodes_from_ucsm(url, username, password): - """ See `maasserver.api.NodeGroupHandler.enlist_nodes_from_ucsm`. """ - probe_and_enlist_ucsm(url, username, password) - - -@task -@log_exception_text -def enlist_nodes_from_mscm(host, username, password): - """ See `maasserver.api.NodeGroupHandler.enlist_nodes_from_mscm`. """ - probe_and_enlist_mscm(host, username, password) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/amqpclient.py maas-1.7.6+bzr3376/src/provisioningserver/testing/amqpclient.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/amqpclient.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/amqpclient.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -# Copyright 2005-2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for `provisioningserver.amqpclient`.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from unittest import skip - -from provisioningserver.amqpclient import AMQFactory -from rabbitfixture.server import RabbitServer -from testtools import TestCase -from testtools.deferredruntest import ( - AsynchronousDeferredRunTestForBrokenTwisted, - ) -from twisted.internet import reactor -from twisted.internet.defer import ( - Deferred, - DeferredQueue, - inlineCallbacks, - ) -from txamqp.client import Closed - - -class QueueWrapper(object): - """ - Wrap a queue to have notifications when get is called on this particular - queue. - """ - - def __init__(self, queue): - self._real_queue_get = queue.get - self.event_queue = DeferredQueue() - queue.get = self.get - - def get(self, timeout=None): - self.event_queue.put(None) - return self._real_queue_get(timeout) - - -class AMQTest(TestCase): - - run_tests_with = AsynchronousDeferredRunTestForBrokenTwisted.make_factory( - timeout=5) - - VHOST = "/" - USER = "guest" - PASSWORD = "guest" - - @skip( - "RabbitMQ is not yet a required component " - "of a running MAAS pserv instance.") - def setUp(self): - """ - At each run, we delete the test vhost and recreate it, to be sure to be - in a clean environment. - """ - super(AMQTest, self).setUp() - - self.rabbit = RabbitServer() - self.useFixture(self.rabbit) - - self.queues = set() - self.exchanges = set() - self.connected_deferred = Deferred() - - self.factory = AMQFactory( - self.USER, self.PASSWORD, self.VHOST, - self.amq_connected, self.amq_disconnected, self.amq_failed) - self.factory.initialDelay = 2.0 - self.connector = reactor.connectTCP( - self.rabbit.config.hostname, self.rabbit.config.port, - self.factory) - return self.connected_deferred - - @inlineCallbacks - def tearDown(self): - # XXX: Moving this up here to silence a nigh-on inexplicable error - # that occurs when it's at the bottom of the function. - self.factory.stopTrying() - self.connector.disconnect() - super(AMQTest, self).tearDown() - - # XXX: This is only safe because we tear down the whole server. - # We can't run this after the tearDown above, because the - # fixture is gone. - return - - self.connected_deferred = Deferred() - factory = AMQFactory( - self.USER, self.PASSWORD, self.VHOST, - self.amq_connected, self.amq_disconnected, self.amq_failed) - connector = reactor.connectTCP( - self.rabbit.config.hostname, self.rabbit.config.port, factory) - yield self.connected_deferred - channel_id = 1 - for queue in self.queues: - try: - yield self.channel.queue_delete(queue=queue) - except Closed: - channel_id += 1 - self.channel = yield self.client.channel(channel_id) - yield self.channel.channel_open() - for exchange in self.exchanges: - try: - yield self.channel.exchange_delete(exchange=exchange) - except Closed: - channel_id += 1 - self.channel = yield self.client.channel(channel_id) - yield self.channel.channel_open() - factory.stopTrying() - connector.disconnect() - - def amq_connected(self, client_and_channel): - """ - Save the channel and client, and fire C{self.connected_deferred}. - - This is the connected_callback that's pased to the L{AMQFactory}. - """ - client, channel = client_and_channel - self.real_queue_declare = channel.queue_declare - channel.queue_declare = self.queue_declare - self.real_exchange_declare = channel.exchange_declare - channel.exchange_declare = self.exchange_declare - self.channel = channel - self.client = client - self.connected_deferred.callback(None) - - def amq_disconnected(self): - """ - This is the disconnected_callback that's passed to the L{AMQFactory}. - """ - - def amq_failed(self, connector_and_reason): - """ - This is the failed_callback that's passed to the L{AMQFactory}. - """ - connector, reason = connector_and_reason - self.connected_deferred.errback(reason) - - def queue_declare(self, queue, **kwargs): - """ - Keep track of the queue declaration, and forward to the real - queue_declare function. - """ - self.queues.add(queue) - return self.real_queue_declare(queue=queue, **kwargs) - - def exchange_declare(self, exchange, **kwargs): - """ - Keep track of the exchange declaration, and forward to the real - exchange_declare function. - """ - self.exchanges.add(exchange) - return self.real_exchange_declare(exchange=exchange, **kwargs) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/bindfixture.py maas-1.7.6+bzr3376/src/provisioningserver/testing/bindfixture.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/bindfixture.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/bindfixture.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012-2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Server fixture for BIND.""" @@ -26,7 +26,7 @@ import fixtures from maastesting.fixtures import TempDirectory from provisioningserver.dns.config import generate_rndc -from provisioningserver.utils import ( +from provisioningserver.utils.fs import ( atomic_write, ensure_dir, ) @@ -88,6 +88,7 @@ options { directory "{{homedir}}"; listen-on port {{port}} {127.0.0.1;}; + listen-on-v6 port {{port}} {::1;}; pid-file "{{homedir}}/named.pid"; session-keyfile "{{homedir}}/session.key"; {{if include_in_options}} diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/boot_images.py maas-1.7.6+bzr3376/src/provisioningserver/testing/boot_images.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/boot_images.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/boot_images.py 2015-07-10 01:27:14.000000000 +0000 @@ -23,15 +23,17 @@ """Create an arbitrary dict of boot-image parameters. These are the parameters that together describe a kind of boot for - which we may need a kernel and initrd: architecture, + which we may need a kernel and initrd: operating system, architecture, sub-architecture, Ubuntu release, boot purpose, and release label. """ return dict( + osystem=factory.make_name('osystem'), architecture=factory.make_name('architecture'), subarchitecture=factory.make_name('subarchitecture'), release=factory.make_name('release'), label=factory.make_name('label'), purpose=factory.make_name('purpose'), + supported_subarches=factory.make_name("sup_subarches"), ) @@ -39,11 +41,36 @@ """Create a dict of boot-image parameters as used to store the image. These are the parameters that together describe a path to store a boot - image: architecture, sub-architecture, Ubuntu release, and release label. + image: operating system, architecture, sub-architecture, Ubuntu release, + and release label. """ return dict( + osystem=factory.make_name('osystem'), architecture=factory.make_name('architecture'), subarchitecture=factory.make_name('subarchitecture'), release=factory.make_name('release'), label=factory.make_name('label'), ) + + +def make_image(params, purpose, metadata=None, xinstall_path=None, + xinstall_type=None): + """Describe an image as a dict similar to what `list_boot_images` returns. + + The `params` are as returned from `make_boot_image_storage_params`. + """ + image = params.copy() + image['purpose'] = purpose + if metadata is not None: + image.update(metadata) + if purpose == 'xinstall': + if xinstall_path is None: + xinstall_path = 'root-tgz' + if xinstall_type is None: + xinstall_type = 'tgz' + image['xinstall_path'] = xinstall_path + image['xinstall_type'] = xinstall_type + else: + image['xinstall_path'] = '' + image['xinstall_type'] = '' + return image diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/config.py maas-1.7.6+bzr3376/src/provisioningserver/testing/config.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/config.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,7 +13,7 @@ __metaclass__ = type __all__ = [ - "BootConfigFixture", + "BootSourcesFixture", "ConfigFixture", "ConfigFixtureBase", "set_tftp_root", @@ -26,10 +26,7 @@ Fixture, ) from maastesting.fixtures import TempDirectory -from provisioningserver.config import ( - BootConfig, - Config, - ) +from provisioningserver.config import Config import yaml @@ -67,15 +64,31 @@ class ConfigFixture(ConfigFixtureBase): - """Fixture to help with testing :class:`Config`.""" + """Fixture to substitute for :class:`Config` in tests.""" schema = Config -class BootConfigFixture(ConfigFixtureBase): - """Fixture to help with testing :class:`BootConfig`.""" +class BootSourcesFixture(Fixture): + """Fixture to substitute for :class:`BootSources` in tests. + + :ivar sources: A list of dicts defining boot sources. + :ivar name: Base name for the file that will hold the YAML + representation of `sources`. It will be in a temporary directory. + :ivar filename: Full path to the YAML file. + """ + + def __init__(self, sources, name='sources.yaml'): + super(BootSourcesFixture, self).__init__() + self.sources = sources + self.name = name - schema = BootConfig + def setUp(self): + super(BootSourcesFixture, self).setUp() + self.dir = self.useFixture(TempDirectory()).path + self.filename = path.join(self.dir, self.name) + with open(self.filename, 'wb') as stream: + yaml.safe_dump(self.sources, stream=stream) def set_tftp_root(tftproot): diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/os.py maas-1.7.6+bzr3376/src/provisioningserver/testing/os.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/os.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/os.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,79 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). +"""Utilities for testing operating systems-related code.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'FakeOS', + ] + + +from maastesting.factory import factory +from provisioningserver.drivers.osystem import ( + OperatingSystem, + OperatingSystemRegistry, + ) + + +class FakeOS(OperatingSystem): + + name = "" + title = "" + + def __init__(self, name, purpose, releases=None): + self.name = name + self.title = name + self.purpose = purpose + if releases is None: + self.fake_list = [ + factory.make_string() + for _ in range(3) + ] + else: + self.fake_list = releases + + def get_boot_image_purposes(self, *args): + return self.purpose + + def is_release_supported(self, release): + return release in self.fake_list + + def get_supported_releases(self): + return self.fake_list + + def get_default_release(self): + return self.fake_list[0] + + def get_release_title(self, release): + return release + + +def make_osystem(testcase, osystem, purpose): + """Makes the operating system class and registers it.""" + if osystem not in OperatingSystemRegistry: + fake = FakeOS(osystem, purpose) + OperatingSystemRegistry.register_item(fake.name, fake) + testcase.addCleanup( + OperatingSystemRegistry.unregister_item, osystem) + return fake + + else: + + obj = OperatingSystemRegistry[osystem] + old_func = obj.get_boot_image_purposes + testcase.patch(obj, 'get_boot_image_purposes').return_value = purpose + + def reset_func(obj, old_func): + obj.get_boot_image_purposes = old_func + + testcase.addCleanup(reset_func, obj, old_func) + + return obj diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/tags.py maas-1.7.6+bzr3376/src/provisioningserver/testing/tags.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/tags.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/tags.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - - -from apiclient.testing.django_client_proxy import MAASDjangoTestClient -from fixtures import Fixture -from maasserver.models import NodeGroup -from maasserver.testing.oauthclient import OAuthAuthenticatedClient -from maasserver.utils.orm import get_one -from maasserver.worker_user import get_worker_user -from provisioningserver import tags -from testtools.monkey import patch - - -def get_nodegroup_cached_knowledge(): - """Get a MAASClient and nodegroup_uuid. - - We make use of the fact that populate_tags refreshes the secrets before it - starts doing work. So effectively the single real-world worker changes - workers on each iteration of the loop. - - The MAASDjangoTestClient that is returned proxies to the Django testing - Client, so we don't actually have to make HTTP calls. - """ - nodegroup_uuid = tags.get_recorded_nodegroup_uuid() - maas_client = get_nodegroup_worker_client(nodegroup_uuid) - return maas_client, nodegroup_uuid - - -def get_nodegroup_worker_client(nodegroup_uuid): - """Get a MAASClient that can do work for this nodegroup.""" - nodegroup = get_one(NodeGroup.objects.filter(uuid=nodegroup_uuid)) - django_client = OAuthAuthenticatedClient( - get_worker_user(), token=nodegroup.api_token) - maas_client = MAASDjangoTestClient(django_client) - return maas_client - - -class TagCachedKnowledgeFixture(Fixture): - """Install the get_nodegroup_cached_knowledge for this test.""" - - def setUp(self): - super(TagCachedKnowledgeFixture, self).setUp() - restore = patch( - tags, "get_cached_knowledge", get_nodegroup_cached_knowledge) - self.addCleanup(restore) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/testcase.py maas-1.7.6+bzr3376/src/provisioningserver/testing/testcase.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/testcase.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/testcase.py 2015-07-10 01:27:14.000000000 +0000 @@ -16,23 +16,15 @@ 'PservTestCase', ] -from apiclient.testing.credentials import make_api_credentials from fixtures import EnvironmentVariableFixture from maastesting import testcase from maastesting.factory import factory -from provisioningserver.auth import ( - record_api_credentials, - record_nodegroup_uuid, - ) -from provisioningserver.testing.worker_cache import WorkerCacheFixture +from twisted.internet import reactor +from twisted.python import threadable class PservTestCase(testcase.MAASTestCase): - def setUp(self): - super(PservTestCase, self).setUp() - self.useFixture(WorkerCacheFixture()) - def make_maas_url(self): return 'http://127.0.0.1/%s' % factory.make_name('path') @@ -40,15 +32,18 @@ self.useFixture( EnvironmentVariableFixture("MAAS_URL", self.make_maas_url())) - def set_api_credentials(self): - record_api_credentials(':'.join(make_api_credentials())) + def register_as_io_thread(self): + """Make the current thread the IO thread. - def set_node_group_uuid(self): - nodegroup_uuid = factory.make_name('nodegroupuuid') - record_nodegroup_uuid(nodegroup_uuid) - - def set_secrets(self): - """Setup all secrets that we would get from refresh_secrets.""" - self.set_maas_url() - self.set_api_credentials() - self.set_node_group_uuid() + When pretending to be the reactor, by using clocks and suchlike, + register the current thread as the reactor thread, a.k.a. the IO + thread, to ensure correct operation from things like the `synchronous` + and `asynchronous` decorators. + + Do not use this when the reactor is running. + """ + self.assertFalse( + reactor.running, "Do not use this to change the IO thread " + "while the reactor is running.") + self.addCleanup(setattr, threadable, "ioThread", threadable.ioThread) + threadable.ioThread = threadable.getThreadID() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/tests/test_bindfixture.py maas-1.7.6+bzr3376/src/provisioningserver/testing/tests/test_bindfixture.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/tests/test_bindfixture.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/tests/test_bindfixture.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the BIND fixture.""" @@ -133,7 +133,7 @@ def test_defaults_reallocated_after_teardown(self): seen_homedirs = set() resources = BINDServerResources() - for i in range(2): + for _ in range(2): with resources: self.assertTrue(os.path.exists(resources.homedir)) self.assertNotIn(resources.homedir, seen_homedirs) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/testing/worker_cache.py maas-1.7.6+bzr3376/src/provisioningserver/testing/worker_cache.py --- maas-1.5.4+bzr2294/src/provisioningserver/testing/worker_cache.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/testing/worker_cache.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Fixture to simulate the cache that worker processes normally share.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - 'WorkerCacheFixture', - ] - -from fixtures import Fixture -from provisioningserver import cache -from testtools.monkey import MonkeyPatcher - - -class WorkerCacheFixture(Fixture): - """Fake the cache that worker processes share.""" - - def setUp(self): - super(WorkerCacheFixture, self).setUp() - patcher = MonkeyPatcher( - (cache, 'cache', cache.Cache({})), - (cache, 'initialized', True)) - self.addCleanup(patcher.restore) - patcher.patch() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_amqaclient.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_amqaclient.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_amqaclient.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_amqaclient.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -# Copyright 2005-2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for C{AMQFactory}.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from provisioningserver.amqpclient import AMQFactory -from provisioningserver.testing.amqpclient import AMQTest -from testtools import TestCase -from testtools.deferredruntest import flush_logged_errors -from twisted.internet.defer import Deferred -from txamqp.protocol import AMQChannel -from txamqp.queue import Closed -from txamqp.spec import Spec - - -class AMQFactoryTest(TestCase): - - def test_init(self): - factory = AMQFactory("guest", "secret", "localhost", lambda x: None, - lambda: None, lambda x: None) - self.assertEquals(factory.user, "guest") - self.assertEquals(factory.password, "secret") - self.assertEquals(factory.vhost, "localhost") - self.assertTrue(isinstance(factory.spec, Spec)) - - -class AMQFactoryConnectedTest(AMQTest): - - def test_connected_callback(self): - self.assertTrue(isinstance(self.channel, AMQChannel)) - - def test_disconnected_callback(self): - d = Deferred() - - def disconnected(): - d.callback(None) - - self.factory.disconnected_callback = disconnected - self.connector.disconnect() - return d - - def test_reconnection(self): - d = Deferred() - - def connected((client, channel)): - self.assertTrue(isinstance(channel, AMQChannel)) - self.assertIsNot(channel, self.channel) - d.callback(None) - - self.factory.connected_callback = connected - self.factory.maxDelay = 0.01 - self.connector.disconnect() - return d - - -class AMQClosingTest(AMQTest): - """Tests the L{AMQFactory} when the connection is closing.""" - - count = 0 - - def amq_connected(self, (client, channel)): - super(AMQClosingTest, self).amq_connected((client, channel)) - if not self.count: - self.count += 1 - raise Closed() - - def test_catch_closed(self): - """ - This test ensures that L{Closed} exception raised by C{amq_connected} - is swallowed by L{AMQFactory}. - """ - errors = flush_logged_errors() - self.assertEquals(len(errors), 0) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_auth.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_auth.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_auth.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_auth.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for management of node-group workers' API credentials.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from apiclient.creds import convert_tuple_to_string -from apiclient.testing.credentials import make_api_credentials -from provisioningserver import ( - auth, - cache, - ) -from provisioningserver.testing.testcase import PservTestCase - - -class TestAuth(PservTestCase): - - def test_record_api_credentials_records_credentials_string(self): - creds_string = convert_tuple_to_string(make_api_credentials()) - auth.record_api_credentials(creds_string) - self.assertEqual( - creds_string, cache.cache.get(auth.API_CREDENTIALS_CACHE_KEY)) - - def test_get_recorded_api_credentials_returns_credentials_as_tuple(self): - creds = make_api_credentials() - auth.record_api_credentials(convert_tuple_to_string(creds)) - self.assertEqual(creds, auth.get_recorded_api_credentials()) - - def test_get_recorded_api_credentials_returns_None_without_creds(self): - self.assertIsNone(auth.get_recorded_api_credentials()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_boot_images.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_boot_images.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_boot_images.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_boot_images.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for reporting of boot images.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import json - -from apiclient.maas_client import MAASClient -from mock import ( - Mock, - sentinel, - ) -from provisioningserver import boot_images -from provisioningserver.boot import tftppath -from provisioningserver.testing.boot_images import make_boot_image_params -from provisioningserver.testing.config import set_tftp_root -from provisioningserver.testing.testcase import PservTestCase - - -class TestBootImagesTasks(PservTestCase): - - def setUp(self): - super(TestBootImagesTasks, self).setUp() - self.useFixture(set_tftp_root(self.make_dir())) - - def test_sends_boot_images_to_server(self): - self.set_maas_url() - self.set_api_credentials() - image = make_boot_image_params() - self.patch(tftppath, 'list_boot_images', Mock(return_value=[image])) - get_cluster_uuid = self.patch(boot_images, "get_cluster_uuid") - get_cluster_uuid.return_value = sentinel.uuid - self.patch(MAASClient, 'post') - boot_images.report_to_server() - args, kwargs = MAASClient.post.call_args - self.assertEqual( - ( - 'api/1.0/nodegroups/%s/boot-images/' % sentinel.uuid, - 'report_boot_images', - ), - (kwargs["path"], kwargs["op"])) - self.assertItemsEqual([image], json.loads(kwargs['images'])) - - def test_does_nothing_without_credentials(self): - self.set_maas_url() - self.patch( - tftppath, 'list_boot_images', - Mock(return_value=make_boot_image_params())) - boot_images.report_to_server() - self.assertItemsEqual([], tftppath.list_boot_images.call_args_list) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_cache.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_cache.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_cache.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_cache.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests cache.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from multiprocessing.managers import DictProxy - -from maastesting.factory import factory -from provisioningserver import cache -from provisioningserver.testing.testcase import PservTestCase - - -class TestCache(PservTestCase): - - def test_initialize_initializes_backend(self): - self.patch(cache, 'initialized', False) - cache.initialize() - self.addCleanup(cache._manager.shutdown) - self.assertIsInstance(cache.cache.cache_backend, DictProxy) - - def test_cache_stores_value(self): - key = factory.getRandomString() - value = factory.getRandomString() - cache.cache.set(key, value) - self.assertEqual(value, cache.cache.get(key)) - - def test_cache_clears_cache(self): - cache.cache.set(factory.getRandomString(), factory.getRandomString()) - cache.cache.clear() - self.assertEqual(0, len(cache.cache.cache_backend)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_config.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_config.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_config.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,6 +18,7 @@ import errno from functools import partial from getpass import getuser +from io import BytesIO import os from textwrap import dedent @@ -28,7 +29,7 @@ from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.config import ( - BootConfig, + BootSources, Config, ConfigBase, ConfigMeta, @@ -366,8 +367,6 @@ default_development_config = deepcopy(default_production_config) default_development_config.update(logfile="/dev/null") - default_development_config["oops"].update( - directory="logs/oops", reporter="maas-pserv") default_development_config["tftp"].update( port=5244, generator="http://localhost:5243/api/1.0/pxeconfig/") @@ -424,71 +423,61 @@ self.assertEqual(broker_password, config['broker']['password']) -class TestBootConfig(MAASTestCase): - """Tests for `provisioningserver.config.BootConfig`.""" +class TestBootSources(MAASTestCase): + """Tests for `provisioningserver.config.BootSources`.""" - default_production_config = { - 'boot': { - 'sources': [ - { - 'path': ( - 'http://maas.ubuntu.com/images/ephemeral-v2/releases/' - ), - 'keyring': ( - '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'), - 'selections': [ - { - 'arches': ['*'], - 'release': '*', - 'subarches': ['*'], - 'labels': ['*'], - }, - ], - }, - ], - 'storage': '/var/lib/maas/boot-resources/', - 'configure_me': False, - }, - } + default_source = { + 'url': ( + 'http://maas.ubuntu.com/images/ephemeral-v2/releases/' + ), + 'keyring': ( + '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'), + 'keyring_data': None, + 'selections': [ + { + 'os': '*', + 'release': '*', + 'labels': ['*'], + 'arches': ['*'], + 'subarches': ['*'], + }, + ], + } - default_development_config = { - 'boot': { - 'sources': [ - { - 'path': ( - 'http://maas.ubuntu.com/' - 'images/ephemeral-v2/releases/'), - 'keyring': ( - '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'), - 'selections': [ - { - 'arches': ['i386', 'amd64'], - 'release': 'trusty', - 'subarches': ['generic'], - 'labels': ['release'], - }, - { - 'arches': ['i386', 'amd64'], - 'release': 'precise', - 'subarches': ['generic'], - 'labels': ['release'], - }, - ], - }, - ], - 'storage': '/var/lib/maas/boot-resources/', - 'configure_me': True, - }, - } + def make_source(self): + """Create a dict defining an arbitrary `BootSource`.""" + return { + 'url': 'http://example.com/' + factory.make_name('path'), + 'keyring': factory.make_name('keyring'), + 'keyring_data': factory.make_string(), + 'selections': [{ + 'os': factory.make_name('os'), + 'release': factory.make_name('release'), + 'labels': [factory.make_name('label')], + 'arches': [factory.make_name('arch')], + 'subarches': [factory.make_name('sub') for _ in range(3)], + }], + } - def test_get_defaults_returns_default_config(self): - # The default configuration is production-ready. - observed = BootConfig.get_defaults() - self.assertEqual(self.default_production_config, observed) + def test_parse_parses_source(self): + sources = [self.make_source()] + self.assertEqual( + sources, + BootSources.parse(BytesIO(yaml.safe_dump(sources)))) - def test_load_example(self): - # The example configuration is designed for development. - filename = os.path.join(root, "etc", "maas", "bootresources.yaml") + def test_parse_parses_multiple_sources(self): + sources = [self.make_source() for _ in range(2)] self.assertEqual( - self.default_development_config, - BootConfig.load(filename)) + sources, + BootSources.parse(BytesIO(yaml.safe_dump(sources)))) + + def test_parse_uses_defaults(self): + self.assertEqual( + [self.default_source], + BootSources.parse(BytesIO(b'[{}]'))) + + def test_load_parses_file(self): + sources = [self.make_source()] + self.assertEqual( + sources, + BootSources.load(self.make_file(contents=yaml.safe_dump(sources)))) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_configure_maas_url.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_configure_maas_url.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_configure_maas_url.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_configure_maas_url.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,351 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for `MAAS_URL` configuration update code.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from argparse import ArgumentParser +from random import randint +from textwrap import dedent + +from maastesting.factory import factory +from maastesting.matchers import ( + MockAnyCall, + MockCalledOnceWith, + ) +from maastesting.testcase import MAASTestCase +from mock import ( + ANY, + Mock, + ) +from provisioningserver import configure_maas_url +from provisioningserver.configure_maas_url import substitute_pserv_yaml_line +from testtools.matchers import ( + FileContains, + StartsWith, + ) + + +class TestRewriteConfigFile(MAASTestCase): + + def test__rewrites_file(self): + path = self.make_file(contents='foo\n') + configure_maas_url.rewrite_config_file(path, lambda line: 'bar') + self.assertThat(path, FileContains('bar\n')) + + def test__sets_access_permissions(self): + writer = self.patch(configure_maas_url, 'atomic_write') + mode = 0215 + path = self.make_file() + configure_maas_url.rewrite_config_file( + path, lambda line: line, mode=mode) + self.assertThat(writer, MockCalledOnceWith(ANY, path, mode=mode)) + + def test__preserves_trailing_newline(self): + path = self.make_file(contents='x\n') + configure_maas_url.rewrite_config_file(path, lambda line: line) + self.assertThat(path, FileContains('x\n')) + + def test__ensures_trailing_newline(self): + path = self.make_file(contents='x') + configure_maas_url.rewrite_config_file(path, lambda line: line) + self.assertThat(path, FileContains('x\n')) + + +class TestUpdateMAASClusterConf(MAASTestCase): + + def patch_file(self, content): + """Inject a fake `/etc/maas/maas_cluster.conf`.""" + path = self.make_file(name='maas_cluster.conf', contents=content) + self.patch(configure_maas_url, 'MAAS_CLUSTER_CONF', path) + return path + + def test__updates_realistic_file(self): + config_file = self.patch_file(dedent("""\ + # Leading comments. + MAAS_URL="http://10.9.8.7/MAAS" + CLUSTER_UUID="5d02950e-6318-8195-ac3e-e6ccb12673c5" + """)) + configure_maas_url.update_maas_cluster_conf('http://1.2.3.4/MAAS') + self.assertThat( + config_file, + FileContains(dedent("""\ + # Leading comments. + MAAS_URL="http://1.2.3.4/MAAS" + CLUSTER_UUID="5d02950e-6318-8195-ac3e-e6ccb12673c5" + """))) + + def test__updates_quoted_value(self): + old_url = factory.make_url() + new_url = factory.make_url() + config_file = self.patch_file('MAAS_URL="%s"\n' % old_url) + configure_maas_url.update_maas_cluster_conf(new_url) + self.assertThat( + config_file, + FileContains('MAAS_URL="%s"\n' % new_url)) + + def test__updates_unquoted_value(self): + old_url = factory.make_url() + new_url = factory.make_url() + config_file = self.patch_file('MAAS_URL=%s\n' % old_url) + configure_maas_url.update_maas_cluster_conf(new_url) + self.assertThat( + config_file, + FileContains('MAAS_URL="%s"\n' % new_url)) + + def test__leaves_other_lines_unchanged(self): + old_content = '#MAAS_URL="%s"\n' % factory.make_url() + config_file = self.patch_file(old_content) + configure_maas_url.update_maas_cluster_conf(factory.make_url()) + self.assertThat(config_file, FileContains(old_content)) + + +class TestExtractHost(MAASTestCase): + + def test__extracts_hostname(self): + host = factory.make_name('host').lower() + port = factory.pick_port() + self.assertEqual( + host, + configure_maas_url.extract_host('http://%s/path' % host)) + self.assertEqual( + host, + configure_maas_url.extract_host('http://%s:%d' % (host, port))) + + def test__extracts_IPv4_address(self): + host = factory.make_ipv4_address() + port = factory.pick_port() + self.assertEqual( + host, + configure_maas_url.extract_host('http://%s' % host)) + self.assertEqual( + host, + configure_maas_url.extract_host('http://%s:%d' % (host, port))) + + def test__extracts_IPv6_address(self): + host = factory.make_ipv6_address() + port = factory.pick_port() + self.assertEqual( + host, + configure_maas_url.extract_host('http://[%s]' % host)) + self.assertEqual( + host, + configure_maas_url.extract_host('http://[%s]:%d' % (host, port))) + + def test__extracts_IPv6_address_with_zone_index(self): + host = ( + factory.make_ipv6_address() + + '%25' + + factory.make_name('zone').lower()) + port = factory.pick_port() + self.assertEqual( + host, + configure_maas_url.extract_host('http://[%s]' % host)) + self.assertEqual( + host, + configure_maas_url.extract_host('http://[%s]:%d' % (host, port))) + + +class TestSubstitutePservYamlLine(MAASTestCase): + + def make_generator_line(self, url): + return " generator: %s" % url + + def test__replaces_hostname_generator_URL(self): + old_host = factory.make_name('old-host') + new_host = factory.make_name('new-host') + input_line = self.make_generator_line('http://%s' % old_host) + self.assertEqual( + self.make_generator_line('http://%s' % new_host), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__replaces_IPv4_generator_URL(self): + old_host = factory.make_ipv4_address() + new_host = factory.make_name('new-host') + input_line = self.make_generator_line('http://%s' % old_host) + self.assertEqual( + self.make_generator_line('http://%s' % new_host), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__replaces_IPv6_generator_URL(self): + old_host = factory.make_ipv6_address() + new_host = factory.make_name('new-host') + input_line = self.make_generator_line('http://[%s]' % old_host) + self.assertEqual( + self.make_generator_line('http://%s' % new_host), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__replaces_IPv6_generator_URL_with_zone_index(self): + old_host = ( + factory.make_ipv6_address() + + '%25' + + factory.make_name('zone') + ) + new_host = factory.make_name('new-host') + input_line = self.make_generator_line('http://[%s]' % old_host) + self.assertEqual( + self.make_generator_line('http://%s' % new_host), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__inserts_IPv6_with_brackets(self): + old_host = factory.make_name('old-host') + new_host = '[%s]' % factory.make_ipv6_address() + input_line = self.make_generator_line('http://%s' % old_host) + self.assertEqual( + self.make_generator_line('http://%s' % new_host), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__inserts_IPv6_without_brackets(self): + old_host = factory.make_name('old-host') + new_host = factory.make_ipv6_address() + input_line = self.make_generator_line('http://%s' % old_host) + self.assertEqual( + self.make_generator_line('http://[%s]' % new_host), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__preserves_port_after_simple_host(self): + port = factory.pick_port() + old_host = factory.make_name('old-host') + new_host = factory.make_name('new-host') + input_line = self.make_generator_line( + 'http://%s:%d' % (old_host, port)) + self.assertEqual( + self.make_generator_line('http://%s:%d' % (new_host, port)), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__preserves_port_with_IPv6(self): + port = factory.pick_port() + old_host = factory.make_ipv6_address() + new_host = factory.make_name('new-host') + input_line = self.make_generator_line( + 'http://[%s]:%d' % (old_host, port)) + self.assertEqual( + self.make_generator_line('http://%s:%d' % (new_host, port)), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__preserves_port_with_IPv6_and_zone_index(self): + port = factory.pick_port() + old_host = ( + factory.make_ipv6_address() + + '%25' + + factory.make_name('zone') + ) + new_host = factory.make_name('new-host') + input_line = self.make_generator_line( + 'http://[%s]:%d' % (old_host, port)) + self.assertEqual( + self.make_generator_line('http://%s:%d' % (new_host, port)), + substitute_pserv_yaml_line(new_host, input_line)) + + def test__preserves_other_line(self): + line = '#' + self.make_generator_line(factory.make_url()) + self.assertEqual( + line, + substitute_pserv_yaml_line(factory.make_name('host'), line)) + + def test__preserves_indentation(self): + spaces = ' ' * randint(0, 10) + input_line = spaces + 'generator: %s' % factory.make_url() + output_line = substitute_pserv_yaml_line( + factory.make_name('host'), input_line) + self.assertThat(output_line, StartsWith(spaces + 'generator:')) + + def test__preserves_trailing_comments(self): + comment = " # Trailing comment." + old_host = factory.make_name('old-host') + new_host = factory.make_name('new-host') + input_line = self.make_generator_line('http://%s' % old_host) + comment + self.assertEqual( + self.make_generator_line('http://%s' % new_host) + comment, + substitute_pserv_yaml_line(new_host, input_line)) + + +class TestUpdatePservYaml(MAASTestCase): + + def patch_file(self, content): + """Inject a fake `/etc/maas/pserv.yaml`.""" + path = self.make_file(name='pserv.yaml', contents=content) + self.patch(configure_maas_url, 'PSERV_YAML', path) + return path + + def test__updates_realistic_file(self): + old_host = factory.make_name('old-host') + new_host = factory.make_name('new-host') + config_file = self.patch_file(dedent("""\ + ## TFTP configuration. + tftp: + ## The URL to be contacted to generate PXE configurations. + generator: http://%s/MAAS/api/1.0/pxeconfig/ + """) % old_host) + configure_maas_url.update_pserv_yaml(new_host) + self.assertThat( + config_file, + FileContains(dedent("""\ + ## TFTP configuration. + tftp: + ## The URL to be contacted to generate PXE configurations. + generator: http://%s/MAAS/api/1.0/pxeconfig/ + """) % new_host)) + + +class TestAddArguments(MAASTestCase): + + def test__accepts_maas_url(self): + url = factory.make_url() + parser = ArgumentParser() + configure_maas_url.add_arguments(parser) + args = parser.parse_args([url]) + self.assertEqual(url, args.maas_url) + + +class TestRun(MAASTestCase): + + def make_args(self, maas_url): + args = Mock() + args.maas_url = maas_url + return args + + def patch_read_file(self): + return self.patch(configure_maas_url, 'read_text_file') + + def patch_write_file(self): + return self.patch(configure_maas_url, 'atomic_write') + + def test__updates_maas_cluster_conf(self): + reader = self.patch_read_file() + writer = self.patch_write_file() + url = factory.make_url() + configure_maas_url.run(self.make_args(url)) + self.assertThat(reader, MockAnyCall('/etc/maas/maas_cluster.conf')) + self.assertThat( + writer, + MockAnyCall(ANY, '/etc/maas/maas_cluster.conf', mode=0640)) + + def test__updates_pserv_yaml(self): + reader = self.patch_read_file() + writer = self.patch_write_file() + url = factory.make_url() + configure_maas_url.run(self.make_args(url)) + self.assertThat(reader, MockAnyCall('/etc/maas/pserv.yaml')) + self.assertThat( + writer, + MockAnyCall(ANY, '/etc/maas/pserv.yaml', mode=0644)) + + def test__passes_host_to_update_pserv_yaml(self): + self.patch_read_file() + self.patch_write_file() + update_pserv_yaml = self.patch(configure_maas_url, 'update_pserv_yaml') + host = factory.make_name('host').lower() + url = factory.make_url(netloc=host) + configure_maas_url.run(self.make_args(url)) + self.assertThat(update_pserv_yaml, MockCalledOnceWith(host)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_customize_config.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_customize_config.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_customize_config.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_customize_config.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for customize_config.""" @@ -42,7 +42,7 @@ customize_config.run(parsed_args) def test_runs_as_script(self): - original_text = factory.getRandomString() + original_text = factory.make_string() original_file = self.make_file(original_text) script = os.path.join(bindir, "maas-provision") command = Popen( @@ -69,10 +69,10 @@ self.assertEqual(expected, output.decode('utf-8')) def test_does_not_modify_original(self): - original_text = factory.getRandomString().encode('ascii') + original_text = factory.make_string().encode('ascii') original_file = self.make_file(contents=original_text) - self.run_command(original_file, factory.getRandomString()) + self.run_command(original_file, factory.make_string()) with open(original_file, 'rb') as reread_file: contents_after = reread_file.read() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_diskless.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_diskless.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_diskless.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_diskless.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,492 @@ +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for creating disks for diskless booting.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os +from textwrap import dedent + +from maastesting.factory import factory +from maastesting.matchers import ( + MockCalledOnceWith, + MockNotCalled, + ) +from maastesting.testcase import MAASTestCase +from mock import sentinel +from provisioningserver import ( + config, + diskless, + ) +from provisioningserver.diskless import ( + compose_diskless_link_path, + compose_diskless_tgt_config, + compose_source_path, + create_diskless_disk, + create_diskless_link, + delete_diskless_disk, + delete_diskless_link, + DisklessError, + get_diskless_driver, + get_diskless_store, + get_diskless_target, + get_diskless_tgt_path, + read_diskless_link, + reload_diskless_tgt, + tgt_entry, + update_diskless_tgt, + ) +from provisioningserver.drivers.diskless import DisklessDriverRegistry +from provisioningserver.drivers.diskless.tests.test_base import ( + make_diskless_driver, + ) +from provisioningserver.drivers.osystem import ( + BOOT_IMAGE_PURPOSE, + OperatingSystemRegistry, + ) +from provisioningserver.testing.os import FakeOS +from provisioningserver.utils.testing import RegistryFixture +from testtools.matchers import ( + FileExists, + Not, + ) + + +class DisklessTestMixin: + """Helper mixin for diskless tests. + + Uses the RegistryFixture so the provisioningserver registry is + empty. + """ + + def setUp(self): + super(DisklessTestMixin, self).setUp() + # Ensure the global registry is empty for each test run. + self.useFixture(RegistryFixture()) + + def configure_resource_storage(self): + resource_dir = self.make_dir() + os.mkdir(os.path.join(resource_dir, 'diskless')) + self.patch(config, 'BOOT_RESOURCES_STORAGE', resource_dir) + return resource_dir + + def configure_diskless_storage(self): + storage_dir = self.make_dir() + self.patch(diskless, 'get_diskless_store').return_value = storage_dir + return storage_dir + + def configure_compose_source_path(self, path=None): + if path is None: + path = self.make_file() + self.patch(diskless, 'compose_source_path').return_value = path + return path + + def make_usable_osystem_with_release(self, purposes=None): + os_name = factory.make_name('os') + release_name = factory.make_name('release') + if purposes is None: + purposes = [BOOT_IMAGE_PURPOSE.DISKLESS] + osystem = FakeOS( + os_name, purposes, releases=[release_name]) + OperatingSystemRegistry.register_item(os_name, osystem) + return os_name, release_name + + def make_usable_diskless_driver(self, name=None, description=None, + settings=None): + driver = make_diskless_driver( + name=name, description=description, settings=settings) + DisklessDriverRegistry.register_item(driver.name, driver) + return driver + + def patch_reload_diskless_tgt(self): + """Stops `reload_diskless_tgt` from running.""" + self.patch(diskless, 'reload_diskless_tgt') + + +class TestHelpers(MAASTestCase, DisklessTestMixin): + + def test_get_diskless_store(self): + storage_dir = factory.make_name('storage') + self.patch(config, 'BOOT_RESOURCES_STORAGE', storage_dir) + self.assertEqual( + os.path.join(storage_dir, 'diskless', 'store'), + get_diskless_store()) + + def test_compose_diskless_link_path(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + self.assertEqual( + os.path.join(storage_dir, system_id), + compose_diskless_link_path(system_id)) + + def test_create_diskless_link_creates_link(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + link_path = factory.make_name('link_path') + create_diskless_link(system_id, link_path) + self.assertEqual( + link_path, os.readlink(os.path.join(storage_dir, system_id))) + + def test_create_diskless_link_error_on_already_exists(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + factory.make_file(storage_dir, system_id) + self.assertRaises( + DisklessError, + create_diskless_link, system_id, 'link_path') + + def test_create_diskless_link_uses_lexists(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + mock_lexists = self.patch(os.path, 'lexists') + mock_lexists.return_value = False + create_diskless_link(system_id, factory.make_name('link_path')) + self.assertThat( + mock_lexists, + MockCalledOnceWith(os.path.join(storage_dir, system_id))) + + def test_delete_diskless_link_deletes_link(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + factory.make_file(storage_dir, system_id) + delete_diskless_link(system_id) + self.assertThat( + os.path.join(storage_dir, system_id), Not(FileExists())) + + def test_delete_diskless_link_uses_lexists(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + mock_lexists = self.patch(os.path, 'lexists') + mock_lexists.return_value = False + delete_diskless_link(system_id) + self.assertThat( + mock_lexists, + MockCalledOnceWith(os.path.join(storage_dir, system_id))) + + def test_read_diskless_link_returns_link_path(self): + system_id = factory.make_name('system_id') + self.configure_diskless_storage() + link_path = factory.make_name('link_path') + create_diskless_link(system_id, link_path) + self.assertEqual(link_path, read_diskless_link(system_id)) + + def test_read_diskless_link_uses_lexists(self): + system_id = factory.make_name('system_id') + storage_dir = self.configure_diskless_storage() + mock_lexists = self.patch(os.path, 'lexists') + mock_lexists.return_value = False + read_diskless_link(system_id) + self.assertThat( + mock_lexists, + MockCalledOnceWith(os.path.join(storage_dir, system_id))) + + def test_get_diskless_driver_returns_driver(self): + driver = self.make_usable_diskless_driver() + self.assertEqual(driver, get_diskless_driver(driver.name)) + + def test_get_diskless_driver_errors_on_missing_driver(self): + invalid_name = factory.make_name('invalid_driver') + self.assertRaises(DisklessError, get_diskless_driver, invalid_name) + + +class TestTgtHelpers(MAASTestCase, DisklessTestMixin): + + def test_get_diskless_target(self): + system_id = factory.make_name('system_id') + self.assertEqual( + 'iqn.2004-05.com.ubuntu:maas:root-diskless-%s' % system_id, + get_diskless_target(system_id)) + + def test_get_diskless_tgt_path(self): + storage_dir = self.configure_resource_storage() + self.assertEqual( + os.path.join(storage_dir, 'diskless', 'maas-diskless.tgt'), + get_diskless_tgt_path()) + + def test_tgt_entry(self): + system_id = factory.make_name('system_id') + image_path = factory.make_name('image_path') + expected_entry = dedent("""\ + + readonly 0 + backing-store "{image}" + driver iscsi + + """).format(system_id=system_id, image=image_path) + self.assertEqual( + expected_entry, + tgt_entry(system_id, image_path)) + + def test_compose_diskless_tgt_config(self): + storage_dir = self.configure_diskless_storage() + system_ids = [factory.make_name('system_id') for _ in range(3)] + tgt_entries = [] + for system_id in system_ids: + factory.make_file(storage_dir, system_id) + tgt_entries.append( + tgt_entry(system_id, os.path.join(storage_dir, system_id))) + tgt_output = compose_diskless_tgt_config() + self.assertItemsEqual( + tgt_entries, [ + '%s\n' % entry + for entry in tgt_output.split('\n') + if entry != "" + ]) + + def test_reload_diskless_tgt(self): + tgt_path = factory.make_name('tgt_path') + self.patch(diskless, 'get_diskless_tgt_path').return_value = tgt_path + mock_call = self.patch(diskless, 'call_and_check') + reload_diskless_tgt() + self.assertThat( + mock_call, + MockCalledOnceWith([ + 'sudo', + '/usr/sbin/tgt-admin', + '--conf', tgt_path, + '--update', 'ALL', + ])) + + def test_update_diskless_tgt_calls_atomic_write(self): + tgt_path = factory.make_name('tgt_path') + self.patch( + diskless, 'get_diskless_tgt_path').return_value = tgt_path + tgt_config = factory.make_name('tgt_config') + self.patch( + diskless, 'compose_diskless_tgt_config').return_value = tgt_config + mock_write = self.patch(diskless, 'atomic_write') + self.patch_reload_diskless_tgt() + update_diskless_tgt() + self.assertThat( + mock_write, + MockCalledOnceWith(tgt_config, tgt_path, mode=0644)) + + +class TestComposeSourcePath(MAASTestCase, DisklessTestMixin): + + def test__raises_error_on_missing_os_from_registry(self): + self.assertRaises( + DisklessError, + compose_source_path, factory.make_name('osystem'), sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + + def test__raises_error_when_os_doesnt_support_diskless(self): + osystem, release = self.make_usable_osystem_with_release( + purposes=[BOOT_IMAGE_PURPOSE.XINSTALL]) + self.assertRaises( + DisklessError, + compose_source_path, osystem, sentinel.arch, sentinel.subarch, + release, sentinel.label) + + def test__returns_valid_path(self): + os_name, release = self.make_usable_osystem_with_release() + arch = factory.make_name('arch') + subarch = factory.make_name('subarch') + label = factory.make_name('label') + root_path = factory.make_name('root_path') + osystem = OperatingSystemRegistry[os_name] + mock_xi_params = self.patch(osystem, 'get_xinstall_parameters') + mock_xi_params.return_value = (root_path, 'tgz') + self.assertEqual( + os.path.join( + config.BOOT_RESOURCES_STORAGE, 'current', os_name, + arch, subarch, release, label, root_path), + compose_source_path(os_name, arch, subarch, release, label)) + + +class TestCreateDisklessDisk(MAASTestCase, DisklessTestMixin): + + def test__raises_error_on_doesnt_exist_source_path(self): + self.configure_compose_source_path(factory.make_name('invalid_path')) + self.assertRaises( + DisklessError, + create_diskless_disk, sentinel.driver, sentinel.driver_options, + sentinel.system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + + def test__raises_error_on_link_already_exists(self): + self.configure_diskless_storage() + self.configure_compose_source_path() + system_id = factory.make_name('system_id') + create_diskless_link(system_id, factory.make_name('link_path')) + self.assertRaises( + DisklessError, + create_diskless_disk, sentinel.driver, sentinel.driver_options, + system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + + def test__calls_create_disk_on_driver(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + self.configure_diskless_storage() + source_path = self.configure_compose_source_path() + driver = self.make_usable_diskless_driver() + mock_create = self.patch(driver, 'create_disk') + mock_create.return_value = self.make_file() + system_id = factory.make_name('system_id') + driver_options = { + factory.make_name('arg'): factory.make_name('value') + for _ in range(3) + } + create_diskless_disk( + driver.name, driver_options, + system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + self.assertThat( + mock_create, + MockCalledOnceWith(system_id, source_path, **driver_options)) + + def test__errors_when_driver_create_disk_returns_None(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + self.configure_diskless_storage() + self.configure_compose_source_path() + driver = self.make_usable_diskless_driver() + mock_create = self.patch(driver, 'create_disk') + mock_create.return_value = None + system_id = factory.make_name('system_id') + self.assertRaises( + DisklessError, + create_diskless_disk, driver.name, {}, + system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + + def test__errors_when_driver_create_disk_returns_invalid_path(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + self.configure_diskless_storage() + self.configure_compose_source_path() + driver = self.make_usable_diskless_driver() + mock_create = self.patch(driver, 'create_disk') + mock_create.return_value = factory.make_name('invalid_path') + system_id = factory.make_name('system_id') + self.assertRaises( + DisklessError, + create_diskless_disk, driver.name, {}, + system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + + def test__creates_diskless_link(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + self.configure_diskless_storage() + self.configure_compose_source_path() + driver = self.make_usable_diskless_driver() + create_file = self.make_file() + mock_create = self.patch(driver, 'create_disk') + mock_create.return_value = create_file + system_id = factory.make_name('system_id') + create_diskless_disk( + driver.name, {}, + system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + self.assertEqual(create_file, read_diskless_link(system_id)) + + def test__calls_update_diskless_tgt(self): + self.configure_resource_storage() + self.configure_diskless_storage() + self.configure_compose_source_path() + driver = self.make_usable_diskless_driver() + mock_create = self.patch(driver, 'create_disk') + mock_create.return_value = self.make_file() + system_id = factory.make_name('system_id') + mock_update_tgt = self.patch(diskless, 'update_diskless_tgt') + create_diskless_disk( + driver.name, {}, + system_id, sentinel.osystem, sentinel.arch, + sentinel.subarch, sentinel.release, sentinel.label) + self.assertThat(mock_update_tgt, MockCalledOnceWith()) + + +class TestDeleteDisklessDisk(MAASTestCase, DisklessTestMixin): + + def test__exits_early_on_missing_link(self): + self.configure_diskless_storage() + system_id = factory.make_name('system_id') + # if read_diskless_link is called then, did not exit early + mock_read_link = self.patch(diskless, 'read_diskless_link') + delete_diskless_disk( + sentinel.driver, sentinel.driver_options, system_id) + self.assertThat(mock_read_link, MockNotCalled()) + + def test__checks_for_link_using_lexists(self): + self.configure_diskless_storage() + system_id = factory.make_name('system_id') + mock_lexists = self.patch(os.path, 'lexists') + mock_lexists.return_value = False + delete_diskless_disk( + sentinel.driver, sentinel.driver_options, system_id) + self.assertThat( + mock_lexists, + MockCalledOnceWith(compose_diskless_link_path(system_id))) + + def test__raises_error_if_read_diskless_link_returns_None(self): + self.configure_diskless_storage() + system_id = factory.make_name('system_id') + create_diskless_link(system_id, factory.make_name('link')) + self.patch(diskless, 'read_diskless_link').return_value = None + self.assertRaises( + DisklessError, delete_diskless_disk, + sentinel.driver, sentinel.driver_options, system_id) + + def test__calls_delete_disk_on_driver_when_link_points_to_valid_path(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + self.configure_diskless_storage() + system_id = factory.make_name('system_id') + link_path = self.make_file() + create_diskless_link(system_id, link_path) + driver = self.make_usable_diskless_driver() + mock_delete = self.patch(driver, 'delete_disk') + driver_options = { + factory.make_name('arg'): factory.make_name('value') + for _ in range(3) + } + delete_diskless_disk(driver.name, driver_options, system_id) + self.assertThat( + mock_delete, + MockCalledOnceWith(system_id, link_path, **driver_options)) + + def test__doenst_call_delete_disk_on_driver_when_link_is_invalid(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + self.configure_diskless_storage() + system_id = factory.make_name('system_id') + create_diskless_link(system_id, factory.make_name('link')) + driver = self.make_usable_diskless_driver() + mock_delete = self.patch(driver, 'delete_disk') + delete_diskless_disk(driver.name, {}, system_id) + self.assertThat(mock_delete, MockNotCalled()) + + def test__deletes_diskless_link(self): + self.patch_reload_diskless_tgt() + self.configure_resource_storage() + storage_dir = self.configure_diskless_storage() + system_id = factory.make_name('system_id') + create_diskless_link(system_id, self.make_file()) + driver = self.make_usable_diskless_driver() + self.patch(driver, 'delete_disk') + delete_diskless_disk(driver.name, {}, system_id) + self.assertThat( + os.path.join(storage_dir, system_id), Not(FileExists())) + + def test__calls_update_diskless_tgt(self): + self.configure_resource_storage() + self.configure_diskless_storage() + system_id = factory.make_name('system_id') + create_diskless_link(system_id, self.make_file()) + driver = self.make_usable_diskless_driver() + self.patch(driver, 'delete_disk') + mock_update_tgt = self.patch(diskless, 'update_diskless_tgt') + delete_diskless_disk(driver.name, {}, system_id) + self.assertThat(mock_update_tgt, MockCalledOnceWith()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_events.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_events.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_events.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_events.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,184 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test event catalog.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + ] + +import random + +from maastesting.factory import factory +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +from mock import ( + ANY, + call, + ) +from provisioningserver.events import ( + EVENT_DETAILS, + EVENT_TYPES, + EventDetail, + send_event_node, + send_event_node_mac_address, + ) +from provisioningserver.rpc import region +from provisioningserver.rpc.exceptions import NoSuchEventType +from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture +from provisioningserver.utils.enum import map_enum +from testtools.matchers import ( + AllMatch, + IsInstance, + ) +from twisted.internet.defer import inlineCallbacks + + +class TestEvents(MAASTestCase): + + def test_every_event_has_details(self): + all_events = map_enum(EVENT_TYPES) + self.assertItemsEqual(all_events.values(), EVENT_DETAILS) + self.assertThat( + EVENT_DETAILS.values(), AllMatch(IsInstance(EventDetail))) + + +class TestSendEvent(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def patch_rpc_methods(self, side_effect=None): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop( + region.SendEvent, region.RegisterEventType) + protocol.SendEvent.side_effect = side_effect + return protocol, connecting + + @inlineCallbacks + def test_send_event_node_stores_event(self): + protocol, connecting = self.patch_rpc_methods() + self.addCleanup((yield connecting)) + + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + description = factory.make_name('description') + event_name = random.choice(map_enum(EVENT_TYPES).keys()) + + yield send_event_node( + event_name, system_id, hostname, description) + self.assertEquals( + [call( + ANY, type_name=event_name, system_id=system_id, + description=description, + )], + protocol.SendEvent.call_args_list, + ) + + @inlineCallbacks + def test_send_event_node_registers_event_type(self): + protocol, connecting = self.patch_rpc_methods( + side_effect=[NoSuchEventType, {}]) + self.addCleanup((yield connecting)) + + system_id = factory.make_name('system_id') + hostname = factory.make_name('hostname') + description = factory.make_name('description') + event_name = random.choice(map_enum(EVENT_TYPES).keys()) + + yield send_event_node(event_name, system_id, hostname, description) + event_detail = EVENT_DETAILS[event_name] + self.assertEquals( + [ + call( + ANY, type_name=event_name, system_id=system_id, + description=description), + call( + ANY, type_name=event_name, system_id=system_id, + description=description), + ], + protocol.SendEvent.call_args_list, + ) + self.assertEquals( + [ + call( + ANY, name=event_name, + description=event_detail.description, + level=event_detail.level), + ], + protocol.RegisterEventType.call_args_list, + ) + + +class TestSendEventMACAddress(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def patch_rpc_methods(self, side_effect=None): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop( + region.SendEventMACAddress, region.RegisterEventType) + protocol.SendEventMACAddress.side_effect = side_effect + return protocol, connecting + + @inlineCallbacks + def test_send_event_node_mac_address_stores_event(self): + protocol, connecting = self.patch_rpc_methods() + self.addCleanup((yield connecting)) + + mac_address = factory.make_mac_address() + description = factory.make_name('description') + event_name = random.choice(map_enum(EVENT_TYPES).keys()) + + yield send_event_node_mac_address( + event_name, mac_address, description) + self.assertEquals( + [call( + ANY, type_name=event_name, mac_address=mac_address, + description=description, + )], + protocol.SendEventMACAddress.call_args_list, + ) + + @inlineCallbacks + def test_send_event_node_mac_address_registers_event_type(self): + protocol, connecting = self.patch_rpc_methods( + side_effect=[NoSuchEventType, {}]) + self.addCleanup((yield connecting)) + + mac_address = factory.make_mac_address() + description = factory.make_name('description') + event_name = random.choice(map_enum(EVENT_TYPES).keys()) + + yield send_event_node_mac_address( + event_name, mac_address, description) + event_detail = EVENT_DETAILS[event_name] + self.assertEquals( + [ + call( + ANY, type_name=event_name, mac_address=mac_address, + description=description), + call( + ANY, type_name=event_name, mac_address=mac_address, + description=description), + ], + protocol.SendEventMACAddress.call_args_list, + ) + self.assertEquals( + [ + call( + ANY, name=event_name, + description=event_detail.description, + level=event_detail.level), + ], + protocol.RegisterEventType.call_args_list, + ) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_kernel_opts.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_kernel_opts.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_kernel_opts.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_kernel_opts.py 2015-07-10 01:27:14.000000000 +0000 @@ -21,7 +21,7 @@ from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver import kernel_opts -from provisioningserver.driver import ( +from provisioningserver.drivers import ( Architecture, ArchitectureRegistry, ) @@ -228,7 +228,8 @@ # options for a "xinstall" node. params = self.make_kernel_parameters(purpose="xinstall") ephemeral_name = get_ephemeral_name( - params.arch, params.subarch, params.release, params.label) + params.osystem, params.arch, params.subarch, + params.release, params.label) self.assertThat( compose_kernel_command_line(params), ContainsAll([ @@ -243,7 +244,8 @@ # options for a "commissioning" node. params = self.make_kernel_parameters(purpose="commissioning") ephemeral_name = get_ephemeral_name( - params.arch, params.subarch, params.release, params.label) + params.osystem, params.arch, params.subarch, + params.release, params.label) self.assertThat( compose_kernel_command_line(params), ContainsAll([ diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_network.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_network.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_network.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_network.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `network` module.""" @@ -16,20 +16,31 @@ from maastesting.factory import factory from maastesting.testcase import MAASTestCase -from netaddr import IPNetwork -from netifaces import AF_INET +from netaddr import ( + IPAddress, + IPNetwork, + ) +from netifaces import ( + AF_INET, + AF_INET6, + ) from provisioningserver import network +from testtools.matchers import HasLength def make_inet_address(subnet=None): - """Fake an AF_INET address.""" + """Fake an `AF_INET` or `AF_INET6` address.""" if subnet is None: - subnet = factory.getRandomNetwork() - return { - 'broadcast': subnet.broadcast, - 'netmask': subnet.netmask, - 'addr': factory.getRandomIPInNetwork(subnet), + subnet = factory.make_ipv4_network() + subnet = IPNetwork(subnet) + addr = { + 'netmask': unicode(subnet.netmask), + 'addr': factory.pick_ip_in_network(subnet), } + if subnet.version == 4: + # IPv4 addresses also have a broadcast field. + addr['broadcast'] = subnet.broadcast + return addr def make_loopback(): @@ -41,7 +52,12 @@ """Minimally fake up an interface definition as returned by netifaces.""" if inet_address is None: inet_address = make_inet_address() - return {AF_INET: [inet_address]} + addr = inet_address.get('addr') + if addr is None or IPAddress(addr).version == 4: + address_family = AF_INET + else: + address_family = AF_INET6 + return {address_family: [inet_address]} class TestNetworks(MAASTestCase): @@ -56,15 +72,25 @@ self.patch( network, 'ifaddresses', lambda interface: interfaces[interface]) + def reveal_IPv6(self, reveal=True): + """Enable or disable IPv6 discovery.""" + self.patch(network, 'REVEAL_IPv6', reveal) + def test_discover_networks_ignores_interface_without_IP_address(self): self.patch_netifaces({factory.make_name('eth'): {}}) self.assertEqual([], network.discover_networks()) - def test_discover_networks_ignores_loopback(self): + def test_discover_networks_ignores_IPv4_loopback(self): self.patch_netifaces({'lo': make_interface(make_loopback())}) self.assertEqual([], network.discover_networks()) - def test_discover_networks_represents_interface(self): + def test_discover_networks_ignores_IPv6_loopback(self): + self.reveal_IPv6(True) + self.patch_netifaces( + {'lo': make_interface(make_inet_address('::1/128'))}) + self.assertEqual([], network.discover_networks()) + + def test_discover_networks_discovers_IPv4_network(self): eth = factory.make_name('eth') interface = make_interface() self.patch_netifaces({eth: interface}) @@ -75,6 +101,26 @@ }], network.discover_networks()) + def test_discover_networks_discovers_IPv6_network_if_revealed(self): + self.reveal_IPv6(True) + eth = factory.make_name('eth') + addr = make_inet_address(factory.make_ipv6_network()) + interface = make_interface(addr) + self.patch_netifaces({eth: interface}) + self.assertEqual([{ + 'interface': eth, + 'ip': addr['addr'], + 'subnet_mask': addr['netmask'], + }], + network.discover_networks()) + + def test_discover_networks_ignores_IPv6_network_if_not_revealed(self): + self.reveal_IPv6(False) + addr = make_inet_address(factory.make_ipv6_network()) + interface = make_interface(addr) + self.patch_netifaces({factory.make_name('eth'): interface}) + self.assertEqual([], network.discover_networks()) + def test_discover_networks_returns_suitable_interfaces(self): eth = factory.make_name('eth') self.patch_netifaces({ @@ -87,6 +133,104 @@ interface['interface'] for interface in network.discover_networks()]) + def test_discover_networks_coalesces_networks_on_interface(self): + self.reveal_IPv6(True) + eth = factory.make_name('eth') + net = factory.make_ipv6_network() + self.patch_netifaces({ + eth: { + AF_INET6: [ + make_inet_address(net), + make_inet_address(net), + ], + }, + }) + interfaces = network.discover_networks() + self.assertThat(interfaces, HasLength(1)) + [interface] = interfaces + self.assertEqual(eth, interface['interface']) + self.assertIn(IPAddress(interface['ip']), net) + + def test_discover_networks_discovers_multiple_networks_per_interface(self): + self.reveal_IPv6(True) + eth = factory.make_name('eth') + net1 = factory.make_ipv6_network() + net2 = factory.make_ipv6_network(disjoint_from=[net1]) + addr1 = factory.pick_ip_in_network(net1) + addr2 = factory.pick_ip_in_network(net2) + self.patch_netifaces({ + eth: { + AF_INET6: [ + make_inet_address(addr1), + make_inet_address(addr2), + ], + }, + }) + interfaces = network.discover_networks() + self.assertThat(interfaces, HasLength(2)) + self.assertEqual( + [eth, eth], + [interface['interface'] for interface in interfaces]) + self.assertItemsEqual( + [addr1, addr2], + [interface['ip'] for interface in interfaces]) + + def test_discover_networks_discovers_IPv4_and_IPv6_on_same_interface(self): + self.reveal_IPv6(True) + eth = factory.make_name('eth') + ipv4_net = factory.make_ipv4_network() + ipv6_net = factory.make_ipv6_network() + ipv4_addr = factory.pick_ip_in_network(ipv4_net) + ipv6_addr = factory.pick_ip_in_network(ipv6_net) + self.patch_netifaces({ + eth: { + AF_INET: [make_inet_address(ipv4_addr)], + AF_INET6: [make_inet_address(ipv6_addr)], + }, + }) + interfaces = network.discover_networks() + self.assertThat(interfaces, HasLength(2)) + self.assertEqual( + [eth, eth], + [interface['interface'] for interface in interfaces]) + self.assertItemsEqual( + [ipv4_addr, ipv6_addr], + [interface['ip'] for interface in interfaces]) + + def test_discover_networks_ignores_link_local_IPv4_addresses(self): + interface = factory.make_name('eth') + ip = factory.pick_ip_in_network(IPNetwork('169.254.0.0/16')) + self.patch_netifaces({interface: {AF_INET: [make_inet_address(ip)]}}) + self.assertEqual([], network.discover_networks()) + + def test_discover_networks_ignores_link_local_IPv6_addresses(self): + interface = factory.make_name('eth') + ip = factory.pick_ip_in_network(IPNetwork('fe80::/10')) + self.patch(network, 'REVEAL_IPv6', True) + self.patch_netifaces({interface: {AF_INET6: [make_inet_address(ip)]}}) + self.assertEqual([], network.discover_networks()) + def test_discover_networks_runs_in_real_life(self): + self.reveal_IPv6(True) interfaces = network.discover_networks() self.assertIsInstance(interfaces, list) + + def test_filter_unique_networks_returns_networks(self): + net = network.AttachedNetwork('eth0', '10.1.1.1', '255.255.255.0') + self.assertEqual([net], network.filter_unique_networks([net])) + + def test_filter_unique_networks_drops_redundant_networks(self): + entry1 = network.AttachedNetwork('eth0', '10.1.1.1', '255.255.255.0') + entry2 = network.AttachedNetwork('eth0', '10.1.1.2', '255.255.255.0') + networks = network.filter_unique_networks([entry1, entry2]) + self.assertThat(networks, HasLength(1)) + self.assertIn(networks[0], [entry1, entry2]) + + def test_filter_unique_networks_orders_consistently(self): + networks = [ + network.AttachedNetwork('eth1', '10.1.1.1', '255.255.255.0'), + network.AttachedNetwork('eth2', '10.2.2.2', '255.255.255.0'), + ] + self.assertEqual( + network.filter_unique_networks(networks), + network.filter_unique_networks(reversed(networks))) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_omshell.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_omshell.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_omshell.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_omshell.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,282 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the omshell.py file.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from itertools import product -import os -import subprocess -import tempfile -from textwrap import dedent - -from maastesting.factory import factory -from maastesting.fakemethod import FakeMethod -from maastesting.fixtures import TempDirectory -from maastesting.testcase import MAASTestCase -from mock import ( - ANY, - Mock, - ) -from provisioningserver import omshell -import provisioningserver.omshell -from provisioningserver.omshell import ( - call_dnssec_keygen, - generate_omapi_key, - Omshell, - ) -from provisioningserver.utils import ExternalProcessError -from testtools.matchers import ( - EndsWith, - MatchesStructure, - ) - - -class TestOmshell(MAASTestCase): - - def test_initialisation(self): - server_address = factory.getRandomString() - shared_key = factory.getRandomString() - shell = Omshell(server_address, shared_key) - self.assertThat( - shell, MatchesStructure.byEquality( - server_address=server_address, - shared_key=shared_key)) - - def test_create_calls_omshell_correctly(self): - server_address = factory.getRandomString() - shared_key = factory.getRandomString() - ip_address = factory.getRandomIPAddress() - mac_address = factory.getRandomMACAddress() - shell = Omshell(server_address, shared_key) - - # Instead of calling a real omshell, we'll just record the - # parameters passed to Popen. - recorder = FakeMethod(result=(0, "hardware-type")) - shell._run = recorder - - shell.create(ip_address, mac_address) - - expected_script = dedent("""\ - server {server} - key omapi_key {key} - connect - new host - set ip-address = {ip} - set hardware-address = {mac} - set hardware-type = 1 - set name = "{ip}" - create - """) - expected_script = expected_script.format( - server=server_address, key=shared_key, ip=ip_address, - mac=mac_address) - - # Check that the 'stdin' arg contains the correct set of - # commands. - self.assertEqual( - [1, (expected_script,)], - [recorder.call_count, recorder.extract_args()[0]]) - - def test_create_raises_when_omshell_fails(self): - # If the call to omshell doesn't result in output containing the - # magic string 'hardware-type' it means the set of commands - # failed. - - server_address = factory.getRandomString() - shared_key = factory.getRandomString() - ip_address = factory.getRandomIPAddress() - mac_address = factory.getRandomMACAddress() - shell = Omshell(server_address, shared_key) - - # Fake a call that results in a failure with random output. - random_output = factory.getRandomString() - recorder = FakeMethod(result=(0, random_output)) - shell._run = recorder - - exc = self.assertRaises( - ExternalProcessError, shell.create, ip_address, mac_address) - self.assertEqual(random_output, exc.output) - - def test_create_succeeds_when_host_map_already_exists(self): - # To omshell, creating the same host map twice is an error. But - # Omshell.create swallows the error and makes it look like - # success. - params = { - 'ip': factory.getRandomIPAddress(), - 'mac': factory.getRandomMACAddress(), - 'hostname': factory.make_name('hostname') - } - shell = Omshell(factory.make_name('server'), factory.make_name('key')) - # This is the kind of error output we get if a host map has - # already been created. - error_output = dedent("""\ - obj: host - ip-address = %(ip)s - hardware-address = %(mac)s - name = "%(hostname)s" - > - can't open object: I/O error - obj: host - ip-address = %(ip)s - hardware-address = %(mac)s - name = "%(hostname)s" - """) % params - shell._run = Mock(return_value=(0, error_output)) - shell.create(params['ip'], params['mac']) - # The test is that we get here without error. - pass - - def test_remove_calls_omshell_correctly(self): - server_address = factory.getRandomString() - shared_key = factory.getRandomString() - ip_address = factory.getRandomIPAddress() - shell = Omshell(server_address, shared_key) - - # Instead of calling a real omshell, we'll just record the - # parameters passed to Popen. - recorder = FakeMethod(result=(0, "thing1\nthing2\nobj: ")) - shell._run = recorder - - shell.remove(ip_address) - - expected_script = dedent("""\ - server {server} - key omapi_key {key} - connect - new host - set name = "{ip}" - open - remove - """) - expected_script = expected_script.format( - server=server_address, key=shared_key, ip=ip_address) - - # Check that the 'stdin' arg contains the correct set of - # commands. - self.assertEqual([(expected_script,)], recorder.extract_args()) - - def test_remove_raises_when_omshell_fails(self): - # If the call to omshell doesn't result in output ending in the - # text 'obj: ' we can be fairly sure this operation - # failed. - server_address = factory.getRandomString() - shared_key = factory.getRandomString() - ip_address = factory.getRandomIPAddress() - shell = Omshell(server_address, shared_key) - - # Fake a call that results in a failure with random output. - random_output = factory.getRandomString() - recorder = FakeMethod(result=(0, random_output)) - shell._run = recorder - - exc = self.assertRaises( - subprocess.CalledProcessError, shell.remove, ip_address) - self.assertEqual(random_output, exc.output) - - def test_remove_works_when_extraneous_blank_last_lines(self): - # Sometimes omshell puts blank lines after the 'obj: ' so - # we need to test that the code still works if that's the case. - server_address = factory.getRandomString() - shared_key = factory.getRandomString() - ip_address = factory.getRandomIPAddress() - shell = Omshell(server_address, shared_key) - - # Fake a call that results in a something with our special output. - output = "\nobj: \n\n" - self.patch(shell, '_run').return_value = (0, output) - self.assertIsNone(shell.remove(ip_address)) - - -class Test_generate_omapi_key(MAASTestCase): - """Tests for omshell.generate_omapi_key""" - - def test_generate_omapi_key_returns_a_key(self): - key = generate_omapi_key() - # Could test for != None here, but the keys end in == for a 512 - # bit length key, so that's a better check that the script was - # actually run and produced output. - self.assertThat(key, EndsWith("==")) - - def test_generate_omapi_key_leaves_no_temp_files(self): - tmpdir = self.useFixture(TempDirectory()).path - # Make mkdtemp() in omshell nest all directories within tmpdir. - self.patch(tempfile, 'tempdir', tmpdir) - generate_omapi_key() - self.assertEqual([], os.listdir(tmpdir)) - - def test_generate_omapi_key_raises_assertionerror_on_no_output(self): - self.patch(omshell, 'call_dnssec_keygen', FakeMethod()) - self.assertRaises(AssertionError, generate_omapi_key) - - def test_generate_omapi_key_raises_assertionerror_on_bad_output(self): - def returns_junk(tmpdir): - key_name = factory.getRandomString() - factory.make_file(tmpdir, "%s.private" % key_name) - return key_name - - self.patch(omshell, 'call_dnssec_keygen', returns_junk) - self.assertRaises(AssertionError, generate_omapi_key) - - def test_run_repeated_keygen(self): - bad_patterns = { - "+no", "/no", "no+", "no/", - "+NO", "/NO", "NO+", "NO/", - } - bad_patterns_templates = { - "foo%sbar", "one\ntwo\n%s\nthree\n", "%s", - } - # Test that a known bad key is ignored and we generate a new one - # to replace it. - bad_keys = { - # This key is known to fail with omshell. - "YXY5pr+No/8NZeodSd27wWbI8N6kIjMF/nrnFIlPwVLuByJKkQcBRtfDrD" - "LLG2U9/ND7/bIlJxEGTUnyipffHQ==", - } - # Fabricate a range of keys containing the known-bad pattern. - bad_keys.update( - template % pattern for template, pattern in product( - bad_patterns_templates, bad_patterns)) - # An iterator that we can exhaust without mutating bad_keys. - iter_bad_keys = iter(bad_keys) - # Reference to the original parse_key_value_file, before we patch. - parse_key_value_file = provisioningserver.omshell.parse_key_value_file - - # Patch parse_key_value_file to return each of the known-bad keys - # we've created, followed by reverting to its usual behaviour. - def side_effect(*args, **kwargs): - try: - return {'Key': next(iter_bad_keys)} - except StopIteration: - return parse_key_value_file(*args, **kwargs) - - mock = self.patch(provisioningserver.omshell, 'parse_key_value_file') - mock.side_effect = side_effect - - # generate_omapi_key() does not return a key known to be bad. - self.assertNotIn(generate_omapi_key(), bad_keys) - - -class TestCallDnsSecKeygen(MAASTestCase): - """Tests for omshell.call_dnssec_keygen.""" - - def test_runs_external_script(self): - check_output = self.patch(subprocess, 'check_output') - target_dir = self.make_dir() - path = os.environ.get("PATH", "").split(os.pathsep) - path.append("/usr/sbin") - call_dnssec_keygen(target_dir) - check_output.assert_called_once_with( - ['dnssec-keygen', '-r', '/dev/urandom', '-a', 'HMAC-MD5', - '-b', '512', '-n', 'HOST', '-K', target_dir, '-q', 'omapi_key'], - env=ANY) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_path.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_path.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_path.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_path.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for filesystem paths.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from os import getcwdu +import os.path + +from fixtures import EnvironmentVariableFixture +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.path import get_path +from testtools.matchers import StartsWith + + +class TestGetPath(MAASTestCase): + + def set_root(self, root_path=None): + """For the duration of this test, set the `MAAS_ROOT` variable`.""" + self.useFixture(EnvironmentVariableFixture('MAAS_ROOT', root_path)) + + def test__defaults_to_root(self): + self.set_root() + self.assertEqual('/', get_path()) + + def test__appends_path_elements(self): + self.set_root() + part1 = factory.make_name('dir') + part2 = factory.make_name('file') + self.assertEqual( + os.path.join('/', part1, part2), + get_path(part1, part2)) + + def test__obeys_MAAS_ROOT_variable(self): + root = factory.make_name('/root') + self.set_root(root) + path = factory.make_name('path') + self.assertEqual(os.path.join(root, path), get_path(path)) + + def test__returns_absolute_path(self): + self.set_root('.') + self.assertThat(get_path(), StartsWith('/')) + self.assertEqual(getcwdu(), get_path()) + + def test__concatenates_despite_leading_slash(self): + root = self.make_dir() + self.set_root(root) + filename = factory.make_name('file') + self.assertEqual( + os.path.join(root, filename), + get_path('/' + filename)) + + def test__normalises(self): + self.set_root() + self.assertEqual('/foo/bar', get_path('foo///szut//..///bar//')) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_plugin.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_plugin.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_plugin.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_plugin.py 2015-07-10 01:27:14.000000000 +0000 @@ -18,21 +18,32 @@ import os from maastesting.factory import factory -from maastesting.testcase import MAASTestCase +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +import provisioningserver +from provisioningserver import plugin as plugin_module from provisioningserver.plugin import ( Options, ProvisioningRealm, ProvisioningServiceMaker, SingleUsernamePasswordChecker, ) -from provisioningserver.tftp import ( +from provisioningserver.pserv_services.dhcp_probe_service import ( + DHCPProbeService, + ) +from provisioningserver.pserv_services.image_download_service import ( + ImageDownloadService, + ) +from provisioningserver.pserv_services.node_power_monitor_service import ( + NodePowerMonitorService, + ) +from provisioningserver.pserv_services.tftp import ( TFTPBackend, TFTPService, ) -from testtools.deferredruntest import ( - assert_fails_with, - AsynchronousDeferredRunTest, - ) +from testtools.deferredruntest import assert_fails_with from testtools.matchers import ( AfterPreprocessing, Equals, @@ -75,11 +86,15 @@ class TestProvisioningServiceMaker(MAASTestCase): """Tests for `provisioningserver.plugin.ProvisioningServiceMaker`.""" - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestProvisioningServiceMaker, self).setUp() + self.patch(provisioningserver, "services", MultiService()) self.tempdir = self.make_dir() + cluster_uuid = factory.make_UUID() + self.patch(plugin_module, 'get_cluster_uuid').return_value = ( + cluster_uuid) def write_config(self, config): config_filename = os.path.join(self.tempdir, "config.yaml") @@ -101,30 +116,39 @@ service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) self.assertIsInstance(service, MultiService) - self.assertSequenceEqual( - ["log", "oops", "rpc", "tftp"], - sorted(service.namedServices)) + expected_services = [ + "dhcp_probe", "image_download", "lease_upload", "log", + "node_monitor", "rpc", "tftp", + ] + self.assertItemsEqual(expected_services, service.namedServices) self.assertEqual( len(service.namedServices), len(service.services), "Not all services are named.") + self.assertEqual(service, provisioningserver.services) - def test_makeService_with_broker(self): - """ - The log, oops, site, and amqp services are created when the broker - user and password options are given. - """ + def test_image_download_service(self): options = Options() - options["config-file"] = self.write_config( - {"broker": {"username": "Bob", "password": "Hoskins"}}) + options["config-file"] = self.write_config({}) service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) - self.assertIsInstance(service, MultiService) - self.assertSequenceEqual( - ["amqp", "log", "oops", "rpc", "tftp"], - sorted(service.namedServices)) - self.assertEqual( - len(service.namedServices), len(service.services), - "Not all services are named.") + image_service = service.getServiceNamed("image_download") + self.assertIsInstance(image_service, ImageDownloadService) + + def test_node_monitor_service(self): + options = Options() + options["config-file"] = self.write_config({}) + service_maker = ProvisioningServiceMaker("Harry", "Hill") + service = service_maker.makeService(options) + node_monitor = service.getServiceNamed("node_monitor") + self.assertIsInstance(node_monitor, NodePowerMonitorService) + + def test_dhcp_probe_service(self): + options = Options() + options["config-file"] = self.write_config({}) + service_maker = ProvisioningServiceMaker("Spike", "Milligan") + service = service_maker.makeService(options) + dhcp_probe = service.getServiceNamed("dhcp_probe") + self.assertIsInstance(dhcp_probe, DHCPProbeService) def test_tftp_service(self): # A TFTP service is configured and added to the top-level service. @@ -132,7 +156,7 @@ "tftp": { "generator": "http://candlemass/solitude", "resource_root": self.tempdir, - "port": factory.getRandomPort(), + "port": factory.pick_port(), }, } options = Options() @@ -161,7 +185,7 @@ class TestSingleUsernamePasswordChecker(MAASTestCase): """Tests for `SingleUsernamePasswordChecker`.""" - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) @inlineCallbacks def test_requestAvatarId_okay(self): diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_plugin_services.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_plugin_services.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_plugin_services.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_plugin_services.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for extra services in `provisioningserver.plugin`.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import signal +import sys + +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +from provisioningserver.plugin import LogService +from testtools.content import content_from_file +from twisted.application.service import MultiService +from twisted.python.log import ( + FileLogObserver, + theLogPublisher, + ) +from twisted.python.logfile import LogFile + + +class TestServicesBase: + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def setUp(self): + super(TestServicesBase, self).setUp() + self.observers = theLogPublisher.observers[:] + self.services = MultiService() + self.services.privilegedStartService() + self.services.startService() + + def tearDown(self): + super(TestServicesBase, self).tearDown() + d = self.services.stopService() + # The log file must be read in right after services have stopped, + # before the temporary directory where the log lives is removed. + d.addBoth(lambda ignore: self.addDetailFromLog()) + d.addBoth(lambda ignore: self.assertNoObserversLeftBehind()) + return d + + def addDetailFromLog(self): + content = content_from_file(self.log_filename, buffer_now=True) + self.addDetail("log", content) + + def assertNoObserversLeftBehind(self): + self.assertEqual(self.observers, theLogPublisher.observers) + + +class TestLogService(TestServicesBase, MAASTestCase): + """Tests for `provisioningserver.services.LogService`.""" + + def test_log_to_stdout(self): + log_service = LogService("-") + log_service.setServiceParent(self.services) + self.assertIsInstance(log_service.observer, FileLogObserver) + self.assertEqual("-", log_service.filename) + self.assertEqual(sys.stdout, log_service.logfile) + # The SIGUSR1 signal handler is untouched. + self.assertEqual( + signal.getsignal(signal.SIGUSR1), + signal.SIG_DFL) + + def test_log_to_file(self): + log_filename = self.make_file(name="test.log") + log_service = LogService(log_filename) + log_service.setServiceParent(self.services) + self.assertIsInstance(log_service.observer, FileLogObserver) + self.assertEqual(log_filename, log_service.filename) + self.assertIsInstance(log_service.logfile, LogFile) + self.assertEqual(log_filename, log_service.logfile.path) + # The SIGUSR1 signal handler is set. + self.assertEqual( + signal.getsignal(signal.SIGUSR1), + log_service._signal_handler) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_security.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_security.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_security.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_security.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,291 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for MAAS's cluster security module.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from os import ( + chmod, + stat, + ) +from os.path import dirname + +from fixtures import EnvironmentVariableFixture +import lockfile +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import ( + ANY, + sentinel, + ) +from provisioningserver import security +from provisioningserver.utils.fs import ( + ensure_dir, + read_text_file, + write_text_file, + ) + + +class TestGetSharedSecretFromFilesystem(MAASTestCase): + + def setUp(self): + super(TestGetSharedSecretFromFilesystem, self).setUp() + self.useFixture(EnvironmentVariableFixture( + "MAAS_ROOT", self.make_dir())) + + def write_secret(self): + secret = factory.make_bytes() + secret_path = security.get_shared_secret_filesystem_path() + ensure_dir(dirname(secret_path)) + write_text_file(secret_path, security.to_hex(secret)) + return secret + + def test__returns_None_when_no_secret_exists(self): + self.assertIsNone(security.get_shared_secret_from_filesystem()) + + def test__returns_secret_when_one_exists(self): + secret = self.write_secret() + self.assertEqual( + secret, security.get_shared_secret_from_filesystem()) + + def test__same_secret_is_returned_on_subsequent_calls(self): + self.write_secret() + self.assertEqual( + security.get_shared_secret_from_filesystem(), + security.get_shared_secret_from_filesystem()) + + def test__errors_reading_file_are_raised(self): + self.write_secret() + secret_path = security.get_shared_secret_filesystem_path() + self.addCleanup(chmod, secret_path, 0o600) + chmod(secret_path, 0o000) + self.assertRaises(IOError, security.get_shared_secret_from_filesystem) + + def test__errors_when_filesystem_value_cannot_be_decoded(self): + self.write_secret() + write_text_file(security.get_shared_secret_filesystem_path(), "_") + self.assertRaises( + TypeError, security.get_shared_secret_from_filesystem) + + def test__deals_fine_with_whitespace_in_filesystem_value(self): + secret = self.write_secret() + write_text_file( + security.get_shared_secret_filesystem_path(), + " %s\n" % security.to_hex(secret)) + self.assertEqual(secret, security.get_shared_secret_from_filesystem()) + + def test__reads_with_lock(self): + lock = lockfile.FileLock(security.get_shared_secret_filesystem_path()) + self.assertFalse(lock.is_locked()) + + def check_lock(path): + self.assertTrue(lock.is_locked()) + return "12" # Two arbitrary hex characters. + + read_text_file = self.patch_autospec(security, "read_text_file") + read_text_file.side_effect = check_lock + security.get_shared_secret_from_filesystem() + self.assertThat(read_text_file, MockCalledOnceWith(ANY)) + self.assertFalse(lock.is_locked()) + + +class TestSetSharedSecretOnFilesystem(MAASTestCase): + + def setUp(self): + super(TestSetSharedSecretOnFilesystem, self).setUp() + self.useFixture(EnvironmentVariableFixture( + "MAAS_ROOT", self.make_dir())) + + def read_secret(self): + secret_path = security.get_shared_secret_filesystem_path() + secret_hex = read_text_file(secret_path) + return security.to_bin(secret_hex) + + def test__writes_secret(self): + secret = factory.make_bytes() + security.set_shared_secret_on_filesystem(secret) + self.assertEqual(secret, self.read_secret()) + + def test__writes_with_lock(self): + lock = lockfile.FileLock(security.get_shared_secret_filesystem_path()) + self.assertFalse(lock.is_locked()) + + def check_lock(path, data): + self.assertTrue(lock.is_locked()) + + write_text_file = self.patch_autospec(security, "write_text_file") + write_text_file.side_effect = check_lock + security.set_shared_secret_on_filesystem(b"foo") + self.assertThat(write_text_file, MockCalledOnceWith(ANY, ANY)) + self.assertFalse(lock.is_locked()) + + def test__writes_with_secure_permissions(self): + secret = factory.make_bytes() + security.set_shared_secret_on_filesystem(secret) + secret_path = security.get_shared_secret_filesystem_path() + perms_observed = stat(secret_path).st_mode & 0o777 + perms_expected = 0o640 + self.assertEqual( + perms_expected, perms_observed, + "Expected %04o, got %04o." % (perms_expected, perms_observed)) + + +class TestInstallSharedSecretScript(MAASTestCase): + + def setUp(self): + super(TestInstallSharedSecretScript, self).setUp() + self.useFixture(EnvironmentVariableFixture( + "MAAS_ROOT", self.make_dir())) + + def test__has_add_arguments(self): + # It doesn't do anything, but it's there to fulfil the contract with + # ActionScript/MainScript. + security.InstallSharedSecretScript.add_arguments(sentinel.parser) + self.assertIsNotNone("Obligatory assertion.") + + def installAndCheckExitCode(self, code): + error = self.assertRaises( + SystemExit, security.InstallSharedSecretScript.run, sentinel.args) + self.assertEqual(code, error.code) + + def test__reads_secret_from_stdin(self): + secret = factory.make_bytes() + + stdin = self.patch_autospec(security, "stdin") + stdin.readline.return_value = secret.encode("hex") + stdin.isatty.return_value = False + + self.installAndCheckExitCode(0) + self.assertEqual( + secret, security.get_shared_secret_from_filesystem()) + + def test__ignores_surrounding_whitespace_from_stdin(self): + secret = factory.make_bytes() + + stdin = self.patch_autospec(security, "stdin") + stdin.readline.return_value = " " + secret.encode("hex") + " \n" + stdin.isatty.return_value = False + + self.installAndCheckExitCode(0) + self.assertEqual( + secret, security.get_shared_secret_from_filesystem()) + + def test__reads_secret_from_tty(self): + secret = factory.make_bytes() + + stdin = self.patch_autospec(security, "stdin") + stdin.isatty.return_value = True + + raw_input = self.patch(security, "raw_input") + raw_input.return_value = secret.encode("hex") + + self.installAndCheckExitCode(0) + self.assertThat( + raw_input, MockCalledOnceWith("Secret (hex/base16 encoded): ")) + self.assertEqual( + secret, security.get_shared_secret_from_filesystem()) + + def test__ignores_surrounding_whitespace_from_tty(self): + secret = factory.make_bytes() + + stdin = self.patch_autospec(security, "stdin") + stdin.isatty.return_value = True + + raw_input = self.patch(security, "raw_input") + raw_input.return_value = " " + secret.encode("hex") + " \n" + + self.installAndCheckExitCode(0) + self.assertEqual( + secret, security.get_shared_secret_from_filesystem()) + + def test__deals_gracefully_with_eof_from_tty(self): + stdin = self.patch_autospec(security, "stdin") + stdin.isatty.return_value = True + + raw_input = self.patch(security, "raw_input") + raw_input.side_effect = EOFError() + + self.installAndCheckExitCode(1) + self.assertIsNone( + security.get_shared_secret_from_filesystem()) + + def test__deals_gracefully_with_interrupt_from_tty(self): + stdin = self.patch_autospec(security, "stdin") + stdin.isatty.return_value = True + + raw_input = self.patch(security, "raw_input") + raw_input.side_effect = KeyboardInterrupt() + + self.assertRaises( + KeyboardInterrupt, + security.InstallSharedSecretScript.run, sentinel.args) + self.assertIsNone( + security.get_shared_secret_from_filesystem()) + + def test__prints_error_message_when_secret_cannot_be_decoded(self): + stdin = self.patch_autospec(security, "stdin") + stdin.readline.return_value = "garbage" + stdin.isatty.return_value = False + + print = self.patch(security, "print") + + self.installAndCheckExitCode(1) + self.assertThat( + print, MockCalledOnceWith( + "Secret could not be decoded:", "Odd-length string", + file=security.stderr)) + + def test__prints_message_when_secret_is_installed(self): + stdin = self.patch_autospec(security, "stdin") + stdin.readline.return_value = factory.make_bytes().encode("hex") + stdin.isatty.return_value = False + + print = self.patch(security, "print") + + self.installAndCheckExitCode(0) + shared_secret_path = security.get_shared_secret_filesystem_path() + self.assertThat( + print, MockCalledOnceWith( + "Secret installed to %s." % shared_secret_path)) + + +class TestCheckForSharedSecretScript(MAASTestCase): + + def setUp(self): + super(TestCheckForSharedSecretScript, self).setUp() + self.useFixture(EnvironmentVariableFixture( + "MAAS_ROOT", self.make_dir())) + + def test__has_add_arguments(self): + # It doesn't do anything, but it's there to fulfil the contract with + # ActionScript/MainScript. + security.CheckForSharedSecretScript.add_arguments(sentinel.parser) + self.assertIsNotNone("Obligatory assertion.") + + def test__exits_non_zero_if_secret_does_not_exist(self): + print = self.patch(security, "print") + error = self.assertRaises( + SystemExit, security.CheckForSharedSecretScript.run, sentinel.args) + self.assertEqual(1, error.code) + self.assertThat( + print, MockCalledOnceWith("Shared-secret is NOT installed.")) + + def test__exits_zero_if_secret_exists(self): + security.set_shared_secret_on_filesystem(factory.make_bytes()) + print = self.patch(security, "print") + error = self.assertRaises( + SystemExit, security.CheckForSharedSecretScript.run, sentinel.args) + self.assertEqual(0, error.code) + self.assertThat( + print, MockCalledOnceWith("Shared-secret is installed.")) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_services.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_services.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_services.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_services.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for `provisioningserver.services`.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -import os -import signal -import sys - -from maastesting.factory import factory -from maastesting.testcase import MAASTestCase -from oops_twisted import OOPSObserver -from provisioningserver.services import ( - LogService, - OOPSService, - ) -from testtools.content import content_from_file -from testtools.deferredruntest import AsynchronousDeferredRunTest -from twisted.application.service import MultiService -from twisted.python.log import ( - FileLogObserver, - theLogPublisher, - ) -from twisted.python.logfile import LogFile - - -class TestServicesBase: - - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) - - def setUp(self): - super(TestServicesBase, self).setUp() - self.observers = theLogPublisher.observers[:] - self.services = MultiService() - self.services.privilegedStartService() - self.services.startService() - - def tearDown(self): - super(TestServicesBase, self).tearDown() - d = self.services.stopService() - # The log file must be read in right after services have stopped, - # before the temporary directory where the log lives is removed. - d.addBoth(lambda ignore: self.addDetailFromLog()) - d.addBoth(lambda ignore: self.assertNoObserversLeftBehind()) - return d - - def addDetailFromLog(self): - content = content_from_file(self.log_filename, buffer_now=True) - self.addDetail("log", content) - - def assertNoObserversLeftBehind(self): - self.assertEqual(self.observers, theLogPublisher.observers) - - -class TestLogService(TestServicesBase, MAASTestCase): - """Tests for `provisioningserver.services.LogService`.""" - - def test_log_to_stdout(self): - log_service = LogService("-") - log_service.setServiceParent(self.services) - self.assertIsInstance(log_service.observer, FileLogObserver) - self.assertEqual("-", log_service.filename) - self.assertEqual(sys.stdout, log_service.logfile) - # The SIGUSR1 signal handler is untouched. - self.assertEqual( - signal.getsignal(signal.SIGUSR1), - signal.SIG_DFL) - - def test_log_to_file(self): - log_filename = self.make_file(name="test.log") - log_service = LogService(log_filename) - log_service.setServiceParent(self.services) - self.assertIsInstance(log_service.observer, FileLogObserver) - self.assertEqual(log_filename, log_service.filename) - self.assertIsInstance(log_service.logfile, LogFile) - self.assertEqual(log_filename, log_service.logfile.path) - # The SIGUSR1 signal handler is set. - self.assertEqual( - signal.getsignal(signal.SIGUSR1), - log_service._signal_handler) - - -class TestOOPSService(TestServicesBase, MAASTestCase): - """Tests for `provisioningserver.services.OOPSService`.""" - - def setUp(self): - super(TestOOPSService, self).setUp() - # OOPSService relies upon LogService. - self.tempdir = self.make_dir() - self.log_filename = factory.make_file( - location=self.tempdir, name="test.log") - self.log_service = LogService(self.log_filename) - self.log_service.setServiceParent(self.services) - - def test_minimal(self): - oops_service = OOPSService(self.log_service, None, None) - oops_service.setServiceParent(self.services) - observer = oops_service.observer - self.assertIsInstance(observer, OOPSObserver) - self.assertEqual([], observer.config.publishers) - self.assertEqual({}, observer.config.template) - - def test_with_all_params(self): - oops_dir = os.path.join(self.tempdir, "oops") - oops_service = OOPSService(self.log_service, oops_dir, "Sidebottom") - oops_service.setServiceParent(self.services) - observer = oops_service.observer - self.assertIsInstance(observer, OOPSObserver) - self.assertEqual(1, len(observer.config.publishers)) - self.assertEqual({"reporter": "Sidebottom"}, observer.config.template) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_start_cluster_controller.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_start_cluster_controller.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_start_cluster_controller.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_start_cluster_controller.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,305 +0,0 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the `start_cluster_controller` command.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from argparse import ArgumentParser -from collections import namedtuple -import httplib -from io import BytesIO -import json -import os -from urllib2 import ( - HTTPError, - URLError, - ) - -from apiclient.maas_client import MAASDispatcher -from apiclient.testing.django import parse_headers_and_body_with_django -from fixtures import ( - EnvironmentVariableFixture, - FakeLogger, - ) -from maastesting.factory import factory -from mock import ( - ANY, - call, - sentinel, - ) -from provisioningserver import start_cluster_controller -from provisioningserver.testing.testcase import PservTestCase -from testtools.matchers import StartsWith - -# Some tests in this file have to import methods from Django. This causes -# Django to parse its settings file and, in Django 1.5+, assert that it -# contains a value for the setting 'SECRET_KEY'. -# The trick we use here is to use this very module as Django's settings -# module and define a value for 'SECRET_KEY'. -SECRET_KEY = 'bogus secret key' - - -class Sleeping(Exception): - """Exception: `sleep` has been called.""" - - -class Executing(Exception): - """Exception: an attempt has been made to start another process. - - It would be inadvisable for tests in this test case to attempt to start - a real celeryd, so we want to know when it tries. - """ - - -def make_url(name_hint='host'): - return "http://%s.example.com/%s/" % ( - factory.make_name(name_hint), - factory.make_name('path'), - ) - - -FakeArgs = namedtuple('FakeArgs', ['server_url', 'user', 'group']) - - -def make_args(server_url=None): - if server_url is None: - server_url = make_url('region') - user = factory.make_name('user') - group = factory.make_name('group') - return FakeArgs(server_url, user, group) - - -class FakeURLOpenResponse: - """Cheap simile of a `urlopen` result.""" - - def __init__(self, content, status=httplib.OK): - self._content = content - self._status_code = status - - def read(self): - return self._content - - def getcode(self): - return self._status_code - - -class TestStartClusterController(PservTestCase): - - def setUp(self): - super(TestStartClusterController, self).setUp() - - self.useFixture(FakeLogger()) - self.patch(start_cluster_controller, 'set_up_logging') - - # Patch out anything that could be remotely harmful if we did it - # accidentally in the test. Make the really outrageous ones - # raise exceptions. - self.patch(start_cluster_controller, 'sleep').side_effect = Sleeping() - self.patch(start_cluster_controller, 'getpwnam') - self.patch(start_cluster_controller, 'getgrnam') - self.patch(os, 'setuid') - self.patch(os, 'setgid') - self.patch(os, 'execvpe').side_effect = Executing() - get_uuid = self.patch(start_cluster_controller, 'get_cluster_uuid') - get_uuid.return_value = factory.getRandomUUID() - - def make_connection_details(self): - return { - 'BROKER_URL': make_url('broker'), - } - - def parse_headers_and_body(self, headers, body): - """Parse ingredients of a web request. - - The headers and body are as passed to :class:`MAASDispatcher`. - """ - # Make Django STFU; just using Django's multipart code causes it to - # pull in a settings module, and it will throw up if it can't. - self.useFixture( - EnvironmentVariableFixture( - "DJANGO_SETTINGS_MODULE", __name__)) - - post, files = parse_headers_and_body_with_django(headers, body) - return post, files - - def prepare_response(self, http_code, content=""): - """Prepare to return the given http response from API request.""" - fake = self.patch(MAASDispatcher, 'dispatch_query') - fake.return_value = FakeURLOpenResponse(content, status=http_code) - return fake - - def prepare_success_response(self): - """Prepare to return connection details from API request.""" - details = self.make_connection_details() - self.prepare_response(httplib.OK, json.dumps(details)) - return details - - def prepare_rejection_response(self): - """Prepare to return "rejected" from API request.""" - self.prepare_response(httplib.FORBIDDEN) - - def prepare_pending_response(self): - """Prepare to return "request pending" from API request.""" - self.prepare_response(httplib.ACCEPTED) - - def test_run_command(self): - # We can't really run the script, but we can verify that (with - # the right system functions patched out) we can run it - # directly. - start_cluster_controller.sleep.side_effect = None - self.prepare_success_response() - parser = ArgumentParser() - start_cluster_controller.add_arguments(parser) - self.assertRaises( - Executing, - start_cluster_controller.run, - parser.parse_args((make_url(),))) - self.assertEqual(1, os.execvpe.call_count) - - def test_uses_given_url(self): - url = make_url('region') - self.patch(start_cluster_controller, 'start_up') - self.prepare_success_response() - start_cluster_controller.run(make_args(server_url=url)) - (args, kwargs) = MAASDispatcher.dispatch_query.call_args - self.assertThat(args[0], StartsWith(url + 'api/1.0/nodegroups/')) - - def test_fails_if_declined(self): - self.patch(start_cluster_controller, 'start_up') - self.prepare_rejection_response() - self.assertRaises( - start_cluster_controller.ClusterControllerRejected, - start_cluster_controller.run, make_args()) - self.assertItemsEqual([], start_cluster_controller.start_up.calls_list) - - def test_polls_while_pending(self): - self.patch(start_cluster_controller, 'start_up') - self.prepare_pending_response() - self.assertRaises( - Sleeping, - start_cluster_controller.run, make_args()) - self.assertItemsEqual([], start_cluster_controller.start_up.calls_list) - - def test_polls_on_unexpected_errors(self): - self.patch(start_cluster_controller, 'start_up') - self.patch(MAASDispatcher, 'dispatch_query').side_effect = HTTPError( - make_url(), httplib.REQUEST_TIMEOUT, "Timeout.", '', BytesIO()) - self.assertRaises( - Sleeping, - start_cluster_controller.run, make_args()) - self.assertItemsEqual([], start_cluster_controller.start_up.calls_list) - - def test_register_passes_cluster_information(self): - self.prepare_success_response() - interface = { - 'interface': factory.make_name('eth'), - 'ip': factory.getRandomIPAddress(), - 'subnet_mask': '255.255.255.0', - } - discover = self.patch(start_cluster_controller, 'discover_networks') - discover.return_value = [interface] - - start_cluster_controller.register(make_url()) - - (args, kwargs) = MAASDispatcher.dispatch_query.call_args - headers, body = kwargs["headers"], kwargs["data"] - post, files = self.parse_headers_and_body(headers, body) - self.assertEqual([interface], json.loads(post['interfaces'])) - self.assertEqual( - start_cluster_controller.get_cluster_uuid.return_value, - post['uuid']) - - def test_starts_up_once_accepted(self): - self.patch(start_cluster_controller, 'start_up') - connection_details = self.prepare_success_response() - server_url = make_url() - start_cluster_controller.run(make_args(server_url=server_url)) - self.assertItemsEqual( - start_cluster_controller.start_up.call_args[0], - (server_url, connection_details)) - - def test_start_up_calls_refresh_secrets(self): - start_cluster_controller.sleep.side_effect = None - url = make_url('region') - connection_details = self.make_connection_details() - self.prepare_success_response() - - self.assertRaises( - Executing, - start_cluster_controller.start_up, - url, connection_details, - factory.make_name('user'), factory.make_name('group')) - - (args, kwargs) = MAASDispatcher.dispatch_query.call_args - self.assertEqual( - url + 'api/1.0/nodegroups/?op=refresh_workers', args[0]) - self.assertEqual('POST', kwargs['method']) - - headers, body = kwargs["headers"], kwargs["data"] - post, files = self.parse_headers_and_body(headers, body) - - def test_start_up_ignores_failure_on_refresh_secrets(self): - start_cluster_controller.sleep.side_effect = None - self.patch(MAASDispatcher, 'dispatch_query').side_effect = URLError( - "Simulated HTTP failure.") - - self.assertRaises( - Executing, - start_cluster_controller.start_up, - make_url(), self.make_connection_details(), - factory.make_name('user'), factory.make_name('group')) - - self.assertEqual(1, os.execvpe.call_count) - - def test_start_celery_sets_gid_before_uid(self): - # The gid should be changed before the uid; it may not be possible to - # change the gid once privileges are dropped. - start_cluster_controller.getpwnam.return_value.pw_uid = sentinel.uid - start_cluster_controller.getgrnam.return_value.gr_gid = sentinel.gid - # Patch setuid and setgid, using the same mock for both, so that we - # can observe call ordering. - setuidgid = self.patch(os, "setuid") - self.patch(os, "setgid", setuidgid) - self.assertRaises( - Executing, start_cluster_controller.start_celery, - make_url(), self.make_connection_details(), sentinel.user, - sentinel.group) - # getpwname and getgrnam are used to query the passwd and group - # databases respectively. - self.assertEqual( - [call(sentinel.user)], - start_cluster_controller.getpwnam.call_args_list) - self.assertEqual( - [call(sentinel.group)], - start_cluster_controller.getgrnam.call_args_list) - # The arguments to the mocked setuid/setgid calls demonstrate that the - # gid was selected first. - self.assertEqual( - [call(sentinel.gid), call(sentinel.uid)], - setuidgid.call_args_list) - - def test_start_celery_passes_environment(self): - server_url = make_url() - connection_details = self.make_connection_details() - self.assertRaises( - Executing, - start_cluster_controller.start_celery, - server_url, connection_details, factory.make_name('user'), - factory.make_name('group')) - - env = dict( - os.environ, - CELERY_BROKER_URL=connection_details['BROKER_URL'], - MAAS_URL=server_url, - ) - os.execvpe.assert_called_once_with(ANY, ANY, env=env) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_tags.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_tags.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_tags.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_tags.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2012 Canonical Ltd. This software is licensed under the +# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for tag updating.""" @@ -16,7 +16,6 @@ import doctest import httplib -import io from itertools import chain import json from textwrap import dedent @@ -38,7 +37,6 @@ sentinel, ) from provisioningserver import tags -from provisioningserver.auth import get_recorded_nodegroup_uuid from provisioningserver.testing.testcase import PservTestCase from testtools.matchers import ( DocTestMatches, @@ -47,20 +45,6 @@ ) -def make_response(status_code, content, content_type=None): - """Return a similar response to that which `urllib2` returns.""" - if content_type is None: - headers_raw = b"" - else: - if isinstance(content_type, unicode): - content_type = content_type.encode("ascii") - headers_raw = b"Content-Type: %s" % content_type - headers = httplib.HTTPMessage(io.BytesIO(headers_raw)) - return urllib2.addinfourl( - fp=io.BytesIO(content), headers=headers, - url=None, code=status_code) - - class TestProcessResponse(PservTestCase): def setUp(self): @@ -69,25 +53,29 @@ def test_process_OK_response_with_JSON_content(self): data = {"abc": 123} - response = make_response( + response = factory.make_response( httplib.OK, json.dumps(data), "application/json") self.assertEqual(data, tags.process_response(response)) def test_process_OK_response_with_BSON_content(self): data = {"abc": 123} - response = make_response( + response = factory.make_response( httplib.OK, bson.BSON.encode(data), "application/bson") self.assertEqual(data, tags.process_response(response)) def test_process_OK_response_with_other_content(self): - data = factory.getRandomBytes() - response = make_response( + data = factory.make_bytes() + response = factory.make_response( httplib.OK, data, "application/octet-stream") self.assertEqual(data, tags.process_response(response)) def test_process_not_OK_response(self): - response = make_response(httplib.NOT_FOUND, b"", "application/json") - response.url = factory.getRandomString() + response = factory.make_response( + httplib.NOT_FOUND, + b"", + "application/json" + ) + response.url = factory.make_string() error = self.assertRaises( urllib2.HTTPError, tags.process_response, response) self.assertThat( @@ -248,7 +236,7 @@ xml = self.do_merge_details({ "lshw": b"", "foom": b"well", - "zoom": b"<>" + factory.getRandomBytes(), + "zoom": b"<>" + factory.make_bytes(), "oops": None, }) expected = """\ @@ -351,7 +339,7 @@ xml = self.do_merge_details({ "lshw": b"", "foom": b"well", - "zoom": b"<>" + factory.getRandomBytes(), + "zoom": b"<>" + factory.make_bytes(), "oops": None, }) expected = """\ @@ -469,7 +457,7 @@ tags, "merge_details", lambda mapping: "merged:" + "+".join(mapping)) - def test(self): + def test__generates_node_details(self): batches = [["s1", "s2"], ["s3"]] responses = [ {"s1": {"foo": "s1"}, @@ -495,29 +483,6 @@ super(TestTagUpdating, self).setUp() self.useFixture(FakeLogger()) - def test_get_cached_knowledge_knows_nothing(self): - # If we haven't given it any secrets, we should get back nothing - self.assertEqual((None, None), tags.get_cached_knowledge()) - - def test_get_cached_knowledge_with_only_url(self): - self.set_maas_url() - self.assertEqual((None, None), tags.get_cached_knowledge()) - - def test_get_cached_knowledge_with_only_url_creds(self): - self.set_maas_url() - self.set_api_credentials() - self.assertEqual((None, None), tags.get_cached_knowledge()) - - def test_get_cached_knowledge_with_all_info(self): - self.set_maas_url() - self.set_api_credentials() - self.set_node_group_uuid() - client, uuid = tags.get_cached_knowledge() - self.assertIsNot(None, client) - self.assertIsInstance(client, MAASClient) - self.assertIsNot(None, uuid) - self.assertEqual(get_recorded_nodegroup_uuid(), uuid) - def fake_client(self): return MAASClient(None, None, self.make_maas_url()) @@ -527,7 +492,7 @@ def test_get_nodes_calls_correct_api_and_parses_result(self): client, uuid = self.fake_cached_knowledge() - response = make_response( + response = factory.make_response( httplib.OK, b'["system-id1", "system-id2"]', 'application/json', @@ -552,7 +517,11 @@ }, } content = bson.BSON.encode(data) - response = make_response(httplib.OK, content, 'application/bson') + response = factory.make_response( + httplib.OK, + content, + 'application/bson' + ) post = self.patch(client, 'post') post.return_value = response result = tags.get_details_for_nodes( @@ -565,7 +534,11 @@ def test_post_updated_nodes_calls_correct_api_and_parses_result(self): client, uuid = self.fake_cached_knowledge() content = b'{"added": 1, "removed": 2}' - response = make_response(httplib.OK, content, 'application/json') + response = factory.make_response( + httplib.OK, + content, + 'application/json' + ) post_mock = MagicMock(return_value=response) self.patch(client, 'post', post_mock) name = factory.make_name('tag') @@ -618,26 +591,16 @@ (['a', 'c'], ['b']), tags.classify(xpath, node_details)) - def test_process_node_tags_no_secrets(self): - self.patch(MAASClient, 'get') - self.patch(MAASClient, 'post') - tag_name = factory.make_name('tag') - self.assertRaises( - tags.MissingCredentials, - tags.process_node_tags, tag_name, '//node', None) - self.assertFalse(MAASClient.get.called) - self.assertFalse(MAASClient.post.called) - def test_process_node_tags_integration(self): - self.set_secrets() + self.set_maas_url() get_nodes = FakeMethod( - result=make_response( + result=factory.make_response( httplib.OK, b'["system-id1", "system-id2"]', 'application/json', )) post_hw_details = FakeMethod( - result=make_response( + result=factory.make_response( httplib.OK, bson.BSON.encode({ 'system-id1': {'lshw': b''}, @@ -647,7 +610,7 @@ )) get_fake = MultiFakeMethod([get_nodes]) post_update_fake = FakeMethod( - result=make_response( + result=factory.make_response( httplib.OK, b'{"added": 1, "removed": 1}', 'application/json', @@ -656,10 +619,12 @@ self.patch(MAASClient, 'get', get_fake) self.patch(MAASClient, 'post', post_fake) tag_name = factory.make_name('tag') - nodegroup_uuid = get_recorded_nodegroup_uuid() + nodegroup_uuid = factory.make_name("nodegroup-uuid") tag_definition = '//lshw:node' tag_nsmap = {"lshw": "lshw"} - tags.process_node_tags(tag_name, tag_definition, tag_nsmap=tag_nsmap) + tags.process_node_tags( + tag_name, tag_definition, tag_nsmap, + self.fake_client(), nodegroup_uuid) nodegroup_url = '/api/1.0/nodegroups/%s/' % (nodegroup_uuid,) tag_url = '/api/1.0/tags/%s/' % (tag_name,) self.assertEqual( @@ -690,9 +655,6 @@ client = object() uuid = factory.make_name('nodegroupuuid') self.patch( - tags, 'get_cached_knowledge', - MagicMock(return_value=(client, uuid))) - self.patch( tags, 'get_nodes_for_node_group', MagicMock(return_value=['a', 'b', 'c'])) fake_first = FakeMethod(result={ @@ -709,8 +671,8 @@ tag_name = factory.make_name('tag') tag_definition = '//node' tags.process_node_tags( - tag_name, tag_definition, tag_nsmap=None, batch_size=2) - tags.get_cached_knowledge.assert_called_once_with() + tag_name, tag_definition, tag_nsmap=None, client=client, + nodegroup_uuid=uuid, batch_size=2) tags.get_nodes_for_node_group.assert_called_once_with(client, uuid) self.assertEqual([((client, uuid, ['a', 'c']), {})], fake_first.calls) self.assertEqual([((client, uuid, ['b']), {})], fake_second.calls) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_tasks.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_tasks.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_tasks.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_tasks.py 2015-07-10 01:27:14.000000000 +0000 @@ -14,332 +14,44 @@ __metaclass__ = type __all__ = [] -from datetime import datetime -import json import os import random -from subprocess import ( - CalledProcessError, - PIPE, - ) +from subprocess import CalledProcessError -from apiclient.creds import convert_tuple_to_string -from apiclient.maas_client import MAASClient -from apiclient.testing.credentials import make_api_credentials import celery from celery import states -from celery.app import app_or_default -from celery.task import Task from maastesting.celery import CeleryFixture from maastesting.factory import factory from maastesting.fakemethod import ( FakeMethod, MultiFakeMethod, ) -from maastesting.matchers import ( - MockAnyCall, - MockCalledOnceWith, - ) -from mock import ( - ANY, - call, - Mock, - sentinel, - ) from netaddr import IPNetwork -from provisioningserver import ( - auth, - boot_images, - cache, - tags, - tasks, - utils, - ) -from provisioningserver.boot import tftppath -from provisioningserver.dhcp import ( - config, - leases, - ) +from provisioningserver import tasks from provisioningserver.dns.config import ( - conf, - DNSForwardZoneConfig, - DNSReverseZoneConfig, MAAS_NAMED_CONF_NAME, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME, - MAAS_NAMED_RNDC_CONF_NAME, - MAAS_RNDC_CONF_NAME, ) -from provisioningserver.power.poweraction import PowerActionFail -from provisioningserver.tags import MissingCredentials +from provisioningserver.dns.testing import patch_dns_config_path +from provisioningserver.dns.zoneconfig import ( + DNSForwardZoneConfig, + DNSReverseZoneConfig, + ) from provisioningserver.tasks import ( - add_new_dhcp_host_map, - enlist_nodes_from_mscm, - enlist_nodes_from_ucsm, - import_boot_images, - Omshell, - power_off, - power_on, - refresh_secrets, - remove_dhcp_host_map, - report_boot_images, - restart_dhcp_server, rndc_command, RNDC_COMMAND_MAX_RETRY, - setup_rndc_configuration, - stop_dhcp_server, - update_node_tags, - UPDATE_NODE_TAGS_MAX_RETRY, - write_dhcp_config, write_dns_config, write_dns_zone_config, write_full_dns_config, ) -from provisioningserver.testing.boot_images import make_boot_image_params -from provisioningserver.testing.config import set_tftp_root from provisioningserver.testing.testcase import PservTestCase -from testresources import FixtureResource +from provisioningserver.utils.shell import ExternalProcessError from testtools.matchers import ( - ContainsAll, Equals, FileExists, MatchesListwise, ) -# An arbitrary MAC address. Not using a properly random one here since -# we might accidentally affect real machines on the network. -arbitrary_mac = "AA:BB:CC:DD:EE:FF" - - -celery_config = app_or_default().conf - - -class TestRefreshSecrets(PservTestCase): - """Tests for the `refresh_secrets` task.""" - - resources = ( - ("celery", FixtureResource(CeleryFixture())), - ) - - def test_does_not_require_arguments(self): - refresh_secrets() - # Nothing is refreshed, but there is no error either. - pass - - def test_breaks_on_unknown_item(self): - self.assertRaises(AssertionError, refresh_secrets, not_an_item=None) - - def test_works_as_a_task(self): - self.assertTrue(refresh_secrets.delay().successful()) - - def test_updates_api_credentials(self): - credentials = make_api_credentials() - refresh_secrets( - api_credentials=convert_tuple_to_string(credentials)) - self.assertEqual(credentials, auth.get_recorded_api_credentials()) - - def test_updates_nodegroup_uuid(self): - nodegroup_uuid = factory.make_name('nodegroupuuid') - refresh_secrets(nodegroup_uuid=nodegroup_uuid) - self.assertEqual(nodegroup_uuid, cache.cache.get('nodegroup_uuid')) - - -class TestPowerTasks(PservTestCase): - - resources = ( - ("celery", FixtureResource(CeleryFixture())), - ) - - def test_ether_wake_power_on_with_not_enough_template_args(self): - # In eager test mode the assertion is raised immediately rather - # than being stored in the AsyncResult, so we need to test for - # that instead of using result.get(). - self.assertRaises( - PowerActionFail, power_on.delay, "ether_wake") - - def test_ether_wake_power_on(self): - result = power_on.delay( - "ether_wake", mac_address=arbitrary_mac) - self.assertTrue(result.successful()) - - def test_ether_wake_does_not_support_power_off(self): - self.assertRaises( - PowerActionFail, power_off.delay, - "ether_wake", mac=arbitrary_mac) - - -class TestPowerTasksResolveMACAddresses(PservTestCase): - - def test_ip_address_is_looked_up_from_mac_address(self): - # Patch out PowerAction; we're just trying to demonstrate that - # it's invoked with an IP address even if one is not supplied. - PowerAction = self.patch(tasks, "PowerAction") - # find_ip_via_arp() is tested elsewhere; we just want to know - # that it has been used. - self.patch(tasks, "find_ip_via_arp").return_value = sentinel.ip_address - # PowerAction.execute() is passed an ip_address argument in - # addition to the mac_address argument when the latter is - # supplied. - tasks.issue_power_action( - sentinel.power_type, "on", mac_address=sentinel.mac_address) - self.assertThat(PowerAction, MockCalledOnceWith(sentinel.power_type)) - self.assertThat(PowerAction.return_value.execute, MockCalledOnceWith( - power_change="on", mac_address=sentinel.mac_address, - ip_address=sentinel.ip_address)) - - def test_ip_address_is_looked_up_when_already_supplied(self): - # Patch out PowerAction; we're just trying to demonstrate that - # it's invoked with an IP address even if one is not supplied. - PowerAction = self.patch(tasks, "PowerAction") - # find_ip_via_arp() is tested elsewhere; we just want to know - # that it has been used. - self.patch(tasks, "find_ip_via_arp").return_value = sentinel.ip_address - # The ip_address argument passed to PowerAction.execute() is - # looked-up via find_ip_via_arp() even if an ip_address is - # passed into issue_power_action(). - tasks.issue_power_action( - sentinel.power_type, "on", mac_address=sentinel.mac_address, - ip_address=sentinel.another_ip_address) - self.assertThat(PowerAction.return_value.execute, MockCalledOnceWith( - power_change="on", mac_address=sentinel.mac_address, - ip_address=sentinel.ip_address)) - - -class TestDHCPTasks(PservTestCase): - - resources = ( - ("celery", FixtureResource(CeleryFixture())), - ) - - def assertRecordedStdin(self, recorder, *args): - # Helper to check that the function recorder "recorder" has all - # of the items mentioned in "args" which are extracted from - # stdin. We can just check that all the parameters that were - # passed are being used. - self.assertThat( - recorder.extract_args()[0][0], - ContainsAll(args)) - - def make_dhcp_config_params(self): - """Fake up a dict of dhcp configuration parameters.""" - param_names = [ - 'interface', - 'subnet', - 'subnet_mask', - 'broadcast_ip', - 'dns_servers', - 'domain_name', - 'router_ip', - 'ip_range_low', - 'ip_range_high', - ] - return { - 'dhcp_subnets': [ - {param: factory.getRandomString() for param in param_names} - ], - 'omapi_key': factory.getRandomString(), - } - - def test_upload_dhcp_leases(self): - self.patch( - leases, 'parse_leases_file', - Mock(return_value=(datetime.utcnow(), {}))) - self.patch(leases, 'process_leases', Mock()) - tasks.upload_dhcp_leases.delay() - self.assertEqual(1, leases.process_leases.call_count) - - def test_add_new_dhcp_host_map(self): - # We don't want to actually run omshell in the task, so we stub - # out the wrapper class's _run method and record what it would - # do. - mac = factory.getRandomMACAddress() - ip = factory.getRandomIPAddress() - server_address = factory.getRandomString() - key = factory.getRandomString() - recorder = FakeMethod(result=(0, "hardware-type")) - self.patch(Omshell, '_run', recorder) - add_new_dhcp_host_map.delay({ip: mac}, server_address, key) - - self.assertRecordedStdin(recorder, ip, mac, server_address, key) - - def test_add_new_dhcp_host_map_failure(self): - # Check that task failures are caught. Nothing much happens in - # the Task code right now though. - mac = factory.getRandomMACAddress() - ip = factory.getRandomIPAddress() - server_address = factory.getRandomString() - key = factory.getRandomString() - self.patch(Omshell, '_run', FakeMethod(result=(0, "this_will_fail"))) - self.assertRaises( - CalledProcessError, add_new_dhcp_host_map.delay, - {mac: ip}, server_address, key) - - def test_remove_dhcp_host_map(self): - # We don't want to actually run omshell in the task, so we stub - # out the wrapper class's _run method and record what it would - # do. - ip = factory.getRandomIPAddress() - server_address = factory.getRandomString() - key = factory.getRandomString() - recorder = FakeMethod(result=(0, "obj: ")) - self.patch(Omshell, '_run', recorder) - remove_dhcp_host_map.delay(ip, server_address, key) - - self.assertRecordedStdin(recorder, ip, server_address, key) - - def test_remove_dhcp_host_map_failure(self): - # Check that task failures are caught. Nothing much happens in - # the Task code right now though. - ip = factory.getRandomIPAddress() - server_address = factory.getRandomString() - key = factory.getRandomString() - self.patch(Omshell, '_run', FakeMethod(result=(0, "this_will_fail"))) - self.assertRaises( - CalledProcessError, remove_dhcp_host_map.delay, - ip, server_address, key) - - def test_write_dhcp_config_invokes_script_correctly(self): - mocked_proc = Mock() - mocked_proc.returncode = 0 - mocked_proc.communicate = Mock(return_value=('output', 'error output')) - mocked_popen = self.patch( - utils, "Popen", Mock(return_value=mocked_proc)) - - config_params = self.make_dhcp_config_params() - write_dhcp_config(**config_params) - - # It should construct Popen with the right parameters. - self.assertThat(mocked_popen, MockAnyCall( - ["sudo", "-n", "maas-provision", "atomic-write", "--filename", - celery_config.DHCP_CONFIG_FILE, "--mode", "0644"], stdin=PIPE)) - - # It should then pass the content to communicate(). - content = config.get_config(**config_params).encode("ascii") - self.assertThat(mocked_proc.communicate, MockAnyCall(content)) - - # Similarly, it also writes the DHCPD interfaces to - # /var/lib/maas/dhcpd-interfaces. - self.assertThat(mocked_popen, MockAnyCall( - [ - "sudo", "-n", "maas-provision", "atomic-write", "--filename", - celery_config.DHCP_INTERFACES_FILE, "--mode", "0644", - ], - stdin=PIPE)) - - def test_restart_dhcp_server_sends_command(self): - self.patch(tasks, 'call_and_check') - restart_dhcp_server() - self.assertThat(tasks.call_and_check, MockCalledOnceWith( - ['sudo', '-n', 'service', 'maas-dhcp-server', 'restart'])) - - def test_stop_dhcp_server_sends_command_and_writes_empty_config(self): - self.patch(tasks, 'call_and_check') - self.patch(tasks, 'sudo_write_file') - stop_dhcp_server() - self.assertThat(tasks.call_and_check, MockCalledOnceWith( - ['sudo', '-n', 'service', 'maas-dhcp-server', 'stop'])) - self.assertThat(tasks.sudo_write_file, MockCalledOnceWith( - celery_config.DHCP_CONFIG_FILE, tasks.DISABLED_DHCP_SERVER)) - def assertTaskRetried(runner, result, nb_retries, task_name): # In celery version 2.5 (in Saucy) a retried tasks that eventually @@ -365,7 +77,7 @@ # Patch DNS_CONFIG_DIR so that the configuration files will be # written in a temporary directory. self.dns_conf_dir = self.make_dir() - self.patch(conf, 'DNS_CONFIG_DIR', self.dns_conf_dir) + patch_dns_config_path(self, self.dns_conf_dir) # Record the calls to 'execute_rndc_command' (instead of # executing real rndc commands). self.rndc_recorder = FakeMethod() @@ -374,7 +86,7 @@ def test_write_dns_config_writes_file(self): zone_names = [random.randint(1, 100), random.randint(1, 100)] - command = factory.getRandomString() + command = factory.make_string() result = write_dns_config.delay( zone_names=zone_names, callback=rndc_command.subtask(args=[command])) @@ -393,19 +105,16 @@ )), result) - def test_write_dns_config_attached_to_dns_worker_queue(self): - self.assertEqual( - write_dns_config.queue, - celery_config.WORKER_QUEUE_DNS) - def test_write_dns_zone_config_writes_file(self): - command = factory.getRandomString() - domain = factory.getRandomString() + command = factory.make_string() + domain = factory.make_string() network = IPNetwork('192.168.0.3/24') - ip = factory.getRandomIPInNetwork(network) + dns_ip = factory.pick_ip_in_network(network) + ip = factory.pick_ip_in_network(network) forward_zone = DNSForwardZoneConfig( domain, serial=random.randint(1, 100), - mapping={factory.getRandomString(): ip}, networks=[network]) + mapping={factory.make_string(): [ip]}, + dns_ip=dns_ip) reverse_zone = DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=network) result = write_dns_zone_config.delay( @@ -430,40 +139,8 @@ )), result) - def test_write_dns_zone_config_attached_to_dns_worker_queue(self): - self.assertEqual( - write_dns_zone_config.queue, - celery_config.WORKER_QUEUE_DNS) - - def test_setup_rndc_configuration_writes_files(self): - command = factory.getRandomString() - result = setup_rndc_configuration.delay( - callback=rndc_command.subtask(args=[command])) - - self.assertThat( - ( - result.successful(), - os.path.join(self.dns_conf_dir, MAAS_RNDC_CONF_NAME), - os.path.join( - self.dns_conf_dir, MAAS_NAMED_RNDC_CONF_NAME), - self.rndc_recorder.calls, - ), - MatchesListwise( - ( - Equals(True), - FileExists(), - FileExists(), - Equals([((command,), {})]), - )), - result) - - def test_setup_rndc_configuration_attached_to_dns_worker_queue(self): - self.assertEqual( - setup_rndc_configuration.queue, - celery_config.WORKER_QUEUE_DNS) - def test_rndc_command_execute_command(self): - command = factory.getRandomString() + command = factory.make_string() result = rndc_command.delay(command) self.assertThat( @@ -484,7 +161,7 @@ [FakeMethod(failure=raised_exception)] * number_of_failures + [FakeMethod()]) self.patch(tasks, 'execute_rndc_command', simulate_failures) - command = factory.getRandomString() + command = factory.make_string() result = rndc_command.delay(command, retry=True) assertTaskRetried( self, result, RNDC_COMMAND_MAX_RETRY + 1, @@ -494,39 +171,38 @@ # If we simulate RNDC_COMMAND_MAX_RETRY + 1 failures, the # task fails. number_of_failures = RNDC_COMMAND_MAX_RETRY + 1 - raised_exception = utils.ExternalProcessError( + raised_exception = ExternalProcessError( random.randint(100, 200), factory.make_name('exception')) simulate_failures = MultiFakeMethod( [FakeMethod(failure=raised_exception)] * number_of_failures + [FakeMethod()]) self.patch(tasks, 'execute_rndc_command', simulate_failures) - command = factory.getRandomString() + command = factory.make_string() self.assertRaises( - utils.ExternalProcessError, rndc_command.delay, + ExternalProcessError, rndc_command.delay, command, retry=True) - def test_rndc_command_attached_to_dns_worker_queue(self): - self.assertEqual(rndc_command.queue, celery_config.WORKER_QUEUE_DNS) - def test_write_full_dns_config_sets_up_config(self): # write_full_dns_config writes the config file, writes # the zone files, and reloads the dns service. - domain = factory.getRandomString() + domain = factory.make_string() network = IPNetwork('192.168.0.3/24') - ip = factory.getRandomIPInNetwork(network) + ip = factory.pick_ip_in_network(network) + dns_ip = factory.pick_ip_in_network(network) zones = [ DNSForwardZoneConfig( domain, serial=random.randint(1, 100), - mapping={factory.getRandomString(): ip}, - networks=[network]), + mapping={factory.make_string(): [ip]}, + dns_ip=dns_ip, + ), DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=network), - ] - command = factory.getRandomString() + ] + command = factory.make_string() result = write_full_dns_config.delay( zones=zones, callback=rndc_command.subtask(args=[command]), - upstream_dns=factory.getRandomIPAddress()) + upstream_dns=factory.make_ipv4_address()) forward_file_name = 'zone.%s' % domain reverse_file_name = 'zone.0.168.192.in-addr.arpa' @@ -549,127 +225,3 @@ FileExists(), FileExists(), ))) - - def test_write_full_dns_attached_to_dns_worker_queue(self): - self.assertEqual( - write_full_dns_config.queue, - celery_config.WORKER_QUEUE_DNS) - - -class TestBootImagesTasks(PservTestCase): - - resources = ( - ("celery", FixtureResource(CeleryFixture())), - ) - - def test_sends_boot_images_to_server(self): - self.useFixture(set_tftp_root(self.make_dir())) - self.set_maas_url() - auth.record_api_credentials(':'.join(make_api_credentials())) - image = make_boot_image_params() - self.patch(tftppath, 'list_boot_images', Mock(return_value=[image])) - self.patch(boot_images, "get_cluster_uuid") - self.patch(MAASClient, 'post') - - report_boot_images.delay() - - args, kwargs = MAASClient.post.call_args - self.assertItemsEqual([image], json.loads(kwargs['images'])) - - -class TestTagTasks(PservTestCase): - - def setUp(self): - super(TestTagTasks, self).setUp() - self.celery = self.useFixture(CeleryFixture()) - - def test_update_node_tags_can_be_retried(self): - self.set_secrets() - # The update_node_tags task can be retried. - # Simulate a temporary failure. - number_of_failures = UPDATE_NODE_TAGS_MAX_RETRY - raised_exception = MissingCredentials( - factory.make_name('exception'), random.randint(100, 200)) - simulate_failures = MultiFakeMethod( - [FakeMethod(failure=raised_exception)] * number_of_failures + - [FakeMethod()]) - self.patch(tags, 'process_node_tags', simulate_failures) - tag = factory.getRandomString() - result = update_node_tags.delay( - tag, '//node', tag_nsmap=None, retry=True) - assertTaskRetried( - self, result, UPDATE_NODE_TAGS_MAX_RETRY + 1, - 'provisioningserver.tasks.update_node_tags') - - def test_update_node_tags_is_retried_a_limited_number_of_times(self): - self.set_secrets() - # If we simulate UPDATE_NODE_TAGS_MAX_RETRY + 1 failures, the - # task fails. - number_of_failures = UPDATE_NODE_TAGS_MAX_RETRY + 1 - raised_exception = MissingCredentials( - factory.make_name('exception'), random.randint(100, 200)) - simulate_failures = MultiFakeMethod( - [FakeMethod(failure=raised_exception)] * number_of_failures + - [FakeMethod()]) - self.patch(tags, 'process_node_tags', simulate_failures) - tag = factory.getRandomString() - self.assertRaises( - MissingCredentials, update_node_tags.delay, tag, - '//node', tag_nsmap=None, retry=True) - - -class TestImportPxeFiles(PservTestCase): - - def make_archive_url(self, name=None): - if name is None: - name = factory.make_name('archive') - return 'http://%s.example.com/%s' % (name, factory.make_name('path')) - - def test_import_boot_images(self): - recorder = self.patch(tasks, 'call_and_check') - import_boot_images() - self.assertThat(recorder, MockCalledOnceWith( - ['sudo', '-n', '-E', 'maas-import-pxe-files'], env=ANY)) - self.assertIsInstance(import_boot_images, Task) - - def test_import_boot_images_preserves_environment(self): - recorder = self.patch(tasks, 'call_and_check') - import_boot_images() - self.assertThat(recorder, MockCalledOnceWith( - ['sudo', '-n', '-E', 'maas-import-pxe-files'], env=os.environ)) - - def test_import_boot_images_sets_proxy(self): - recorder = self.patch(tasks, 'call_and_check') - proxy = factory.getRandomString() - import_boot_images(http_proxy=proxy) - expected_env = dict(os.environ, http_proxy=proxy, https_proxy=proxy) - self.assertThat(recorder, MockCalledOnceWith( - ['sudo', '-n', '-E', 'maas-import-pxe-files'], env=expected_env)) - - def test_import_boot_images_calls_callback(self): - self.patch(tasks, 'call_and_check') - mock_callback = Mock() - import_boot_images(callback=mock_callback) - self.assertEqual([call()], mock_callback.delay.mock_calls) - - -class TestAddUCSM(PservTestCase): - - def test_enlist_nodes_from_ucsm(self): - url = 'url' - username = 'username' - password = 'password' - mock = self.patch(tasks, 'probe_and_enlist_ucsm') - enlist_nodes_from_ucsm(url, username, password) - self.assertThat(mock, MockCalledOnceWith(url, username, password)) - - -class TestAddMSCM(PservTestCase): - - def test_enlist_nodes_from_mscm(self): - host = 'host' - username = 'username' - password = 'password' - mock = self.patch(tasks, 'probe_and_enlist_mscm') - enlist_nodes_from_mscm(host, username, password) - self.assertThat(mock, MockCalledOnceWith(host, username, password)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_tftp.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_tftp.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_tftp.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_tftp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,332 +0,0 @@ -# Copyright 2005-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Tests for the maastftp Twisted plugin.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [] - -from functools import partial -import json -from os import path -from urllib import urlencode -from urlparse import ( - parse_qsl, - urlparse, - ) - -from maastesting.factory import factory -from maastesting.matchers import MockCalledOnceWith -from maastesting.testcase import MAASTestCase -import mock -from provisioningserver import tftp as tftp_module -from provisioningserver.boot import BytesReader -from provisioningserver.boot.pxe import PXEBootMethod -from provisioningserver.boot.tests.test_pxe import compose_config_path -from provisioningserver.tests.test_kernel_opts import make_kernel_parameters -from provisioningserver.tftp import ( - TFTPBackend, - TFTPService, - ) -from testtools.deferredruntest import AsynchronousDeferredRunTest -from testtools.matchers import ( - AfterPreprocessing, - AllMatch, - Equals, - IsInstance, - MatchesAll, - MatchesStructure, - ) -from tftp.backend import IReader -from tftp.protocol import TFTP -from twisted.application import internet -from twisted.application.service import MultiService -from twisted.internet import reactor -from twisted.internet.defer import ( - inlineCallbacks, - succeed, - ) -from twisted.python import context -from zope.interface.verify import verifyObject - - -class TestBytesReader(MAASTestCase): - """Tests for `provisioningserver.tftp.BytesReader`.""" - - def test_interfaces(self): - reader = BytesReader(b"") - self.addCleanup(reader.finish) - verifyObject(IReader, reader) - - def test_read(self): - data = factory.getRandomString(size=10).encode("ascii") - reader = BytesReader(data) - self.addCleanup(reader.finish) - self.assertEqual(data[:7], reader.read(7)) - self.assertEqual(data[7:], reader.read(7)) - self.assertEqual(b"", reader.read(7)) - - def test_finish(self): - reader = BytesReader(b"1234") - reader.finish() - self.assertRaises(ValueError, reader.read, 1) - - -class TestTFTPBackend(MAASTestCase): - """Tests for `provisioningserver.tftp.TFTPBackend`.""" - - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) - - def test_init(self): - temp_dir = self.make_dir() - generator_url = "http://%s.example.com/%s" % ( - factory.make_name("domain"), factory.make_name("path")) - backend = TFTPBackend(temp_dir, generator_url) - self.assertEqual((True, False), (backend.can_read, backend.can_write)) - self.assertEqual(temp_dir, backend.base.path) - self.assertEqual(generator_url, backend.generator_url.geturl()) - - def test_get_generator_url(self): - # get_generator_url() merges the parameters obtained from the request - # file path (arch, subarch, name) into the configured generator URL. - mac = factory.getRandomMACAddress("-") - dummy = factory.make_name("dummy").encode("ascii") - backend_url = b"http://example.com/?" + urlencode({b"dummy": dummy}) - backend = TFTPBackend(self.make_dir(), backend_url) - # params is an example of the parameters obtained from a request. - params = {"mac": mac} - generator_url = urlparse(backend.get_generator_url(params)) - self.assertEqual("example.com", generator_url.hostname) - query = parse_qsl(generator_url.query) - query_expected = [ - ("dummy", dummy), - ("mac", mac), - ] - self.assertItemsEqual(query_expected, query) - - @inlineCallbacks - def test_get_reader_regular_file(self): - # TFTPBackend.get_reader() returns a regular FilesystemReader for - # paths not matching re_config_file. - data = factory.getRandomString().encode("ascii") - temp_file = self.make_file(name="example", contents=data) - temp_dir = path.dirname(temp_file) - backend = TFTPBackend(temp_dir, "http://nowhere.example.com/") - reader = yield backend.get_reader("example") - self.addCleanup(reader.finish) - self.assertEqual(len(data), reader.size) - self.assertEqual(data, reader.read(len(data))) - self.assertEqual(b"", reader.read(1)) - - @inlineCallbacks - def test_get_render_file(self): - # For paths matching PXEBootMethod.match_path, TFTPBackend.get_reader() - # returns a Deferred that will yield a BytesReader. - cluster_uuid = factory.getRandomUUID() - self.patch(tftp_module, 'get_cluster_uuid').return_value = ( - cluster_uuid) - mac = factory.getRandomMACAddress("-") - config_path = compose_config_path(mac) - backend = TFTPBackend(self.make_dir(), b"http://example.com/") - # python-tx-tftp sets up call context so that backends can discover - # more about the environment in which they're running. - call_context = { - "local": ( - factory.getRandomIPAddress(), - factory.getRandomPort()), - "remote": ( - factory.getRandomIPAddress(), - factory.getRandomPort()), - } - - @partial(self.patch, backend, "get_boot_method_reader") - def get_boot_method_reader(boot_method, params): - params_json = json.dumps(params) - params_json_reader = BytesReader(params_json) - return succeed(params_json_reader) - - reader = yield context.call( - call_context, backend.get_reader, config_path) - output = reader.read(10000) - # The addresses provided by python-tx-tftp in the call context are - # passed over the wire as address:port strings. - expected_params = { - "mac": mac, - "local": call_context["local"][0], # address only. - "remote": call_context["remote"][0], # address only. - "cluster_uuid": cluster_uuid, - } - observed_params = json.loads(output) - self.assertEqual(expected_params, observed_params) - - @inlineCallbacks - def test_get_boot_method_reader_returns_rendered_params(self): - # get_boot_method_reader() takes a dict() of parameters and returns an - # `IReader` of a PXE configuration, rendered by - # `PXEBootMethod.get_reader`. - backend = TFTPBackend(self.make_dir(), b"http://example.com/") - # Fake configuration parameters, as discovered from the file path. - fake_params = {"mac": factory.getRandomMACAddress("-")} - # Fake kernel configuration parameters, as returned from the API call. - fake_kernel_params = make_kernel_parameters() - - # Stub get_page to return the fake API configuration parameters. - fake_get_page_result = json.dumps(fake_kernel_params._asdict()) - get_page_patch = self.patch(backend, "get_page") - get_page_patch.return_value = succeed(fake_get_page_result) - - # Stub get_reader to return the render parameters. - method = PXEBootMethod() - fake_render_result = factory.make_name("render").encode("utf-8") - render_patch = self.patch(method, "get_reader") - render_patch.return_value = BytesReader(fake_render_result) - - # Get the rendered configuration, which will actually be a JSON dump - # of the render-time parameters. - reader = yield backend.get_boot_method_reader(method, fake_params) - self.addCleanup(reader.finish) - self.assertIsInstance(reader, BytesReader) - output = reader.read(10000) - - # The kernel parameters were fetched using `backend.get_page`. - self.assertThat(backend.get_page, MockCalledOnceWith(mock.ANY)) - - # The result has been rendered by `method.get_reader`. - self.assertEqual(fake_render_result.encode("utf-8"), output) - self.assertThat(method.get_reader, MockCalledOnceWith( - backend, kernel_params=fake_kernel_params, **fake_params)) - - @inlineCallbacks - def test_get_boot_method_render_substitutes_armhf_in_params(self): - # get_config_reader() should substitute "arm" for "armhf" in the - # arch field of the parameters (mapping from pxe to maas - # namespace). - cluster_uuid = factory.getRandomUUID() - self.patch(tftp_module, 'get_cluster_uuid').return_value = ( - cluster_uuid) - config_path = "pxelinux.cfg/default-arm" - backend = TFTPBackend(self.make_dir(), b"http://example.com/") - # python-tx-tftp sets up call context so that backends can discover - # more about the environment in which they're running. - call_context = { - "local": ( - factory.getRandomIPAddress(), - factory.getRandomPort()), - "remote": ( - factory.getRandomIPAddress(), - factory.getRandomPort()), - } - - @partial(self.patch, backend, "get_boot_method_reader") - def get_boot_method_reader(boot_method, params): - params_json = json.dumps(params) - params_json_reader = BytesReader(params_json) - return succeed(params_json_reader) - - reader = yield context.call( - call_context, backend.get_reader, config_path) - output = reader.read(10000) - observed_params = json.loads(output) - self.assertEqual("armhf", observed_params["arch"]) - - -class TestTFTPService(MAASTestCase): - - def test_tftp_service(self): - # A TFTP service is configured and added to the top-level service. - interfaces = [ - factory.getRandomIPAddress(), - factory.getRandomIPAddress(), - ] - self.patch( - tftp_module, "get_all_interface_addresses", - lambda: interfaces) - example_root = self.make_dir() - example_generator = "http://example.com/generator" - example_port = factory.getRandomPort() - tftp_service = TFTPService( - resource_root=example_root, generator=example_generator, - port=example_port) - tftp_service.updateServers() - # The "tftp" service is a multi-service containing UDP servers for - # each interface defined by get_all_interface_addresses(). - self.assertIsInstance(tftp_service, MultiService) - # There's also a TimerService that updates the servers every 45s. - self.assertThat( - tftp_service.refresher, MatchesStructure.byEquality( - step=45, parent=tftp_service, name="refresher", - call=(tftp_service.updateServers, (), {}), - )) - expected_backend = MatchesAll( - IsInstance(TFTPBackend), - AfterPreprocessing( - lambda backend: backend.base.path, - Equals(example_root)), - AfterPreprocessing( - lambda backend: backend.generator_url.geturl(), - Equals(example_generator))) - expected_protocol = MatchesAll( - IsInstance(TFTP), - AfterPreprocessing( - lambda protocol: protocol.backend, - expected_backend)) - expected_server = MatchesAll( - IsInstance(internet.UDPServer), - AfterPreprocessing( - lambda service: len(service.args), - Equals(2)), - AfterPreprocessing( - lambda service: service.args[0], # port - Equals(example_port)), - AfterPreprocessing( - lambda service: service.args[1], # protocol - expected_protocol)) - self.assertThat( - tftp_service.getServers(), - AllMatch(expected_server)) - # Only the interface used for each service differs. - self.assertItemsEqual( - [svc.kwargs for svc in tftp_service.getServers()], - [{"interface": interface} for interface in interfaces]) - - def test_tftp_service_rebinds_on_HUP(self): - # Initial set of interfaces to bind to. - interfaces = {"1.1.1.1", "2.2.2.2"} - self.patch( - tftp_module, "get_all_interface_addresses", - lambda: interfaces) - - tftp_service = TFTPService( - resource_root=self.make_dir(), generator="http://mighty/wind", - port=factory.getRandomPort()) - tftp_service.updateServers() - - # The child services of tftp_services are named after the - # interface they bind to. - self.assertEqual(interfaces, { - server.name for server in tftp_service.getServers() - }) - - # Update the set of interfaces to bind to. - interfaces.add("3.3.3.3") - interfaces.remove("1.1.1.1") - - # Ask the TFTP service to update its set of servers. - tftp_service.updateServers() - - # We're in the reactor thread but we want to move the reactor - # forwards, hence we need to get all explicit about it. - reactor.runUntilCurrent() - - # The interfaces now bound match the updated interfaces set. - self.assertEqual(interfaces, { - server.name for server in tftp_service.getServers() - }) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_udev.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_udev.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_udev.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_udev.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,89 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for udev rules generation code.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.udev import ( + compose_network_interfaces_udev_rules, + compose_udev_attr_equality, + compose_udev_equality, + compose_udev_rule, + compose_udev_setting, + ) +from testtools.matchers import ContainsAll + + +class TestComposeUdevEquality(MAASTestCase): + + def test__generates_comparison_with_double_equals_sign(self): + self.assertEqual('KEY=="value"', compose_udev_equality('KEY', 'value')) + + def test__rejects_lower_case_letters_in_key(self): + self.assertRaises( + AssertionError, + compose_udev_equality, 'key', 'value') + + +class TestComposeUdevAttrEquality(MAASTestCase): + + def test__generates_comparison_with_double_equals_sign(self): + self.assertEqual( + 'ATTR{key}=="value"', + compose_udev_attr_equality('key', 'value')) + + def test__rejects_upper_case_letters_in_key(self): + self.assertRaises( + AssertionError, + compose_udev_attr_equality, 'KEY', 'value') + + +class TestComposeUdevSetting(MAASTestCase): + + def test__generates_assignment_with_single_equals_sign(self): + self.assertEqual('KEY="value"', compose_udev_setting('KEY', 'value')) + + def test__rejects_lower_case_letters_in_key(self): + self.assertRaises( + AssertionError, + compose_udev_setting, 'key', 'value') + + +class TestComposeUdevRule(MAASTestCase): + + def test__generates_rule(self): + interface = factory.make_name('eth') + mac = factory.make_mac_address() + expected_rule = ( + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' + 'ATTR{address}=="%(mac)s", NAME="%(interface)s"\n' + ) % {'mac': mac, 'interface': interface} + self.assertEqual(expected_rule, compose_udev_rule(interface, mac)) + + +class TestComposeNetworkInterfacesUdevRules(MAASTestCase): + + def test__generates_udev_rules(self): + interfaces = [ + (factory.make_name('eth'), factory.make_mac_address()) + for _ in range(2) + ] + + self.assertThat( + compose_network_interfaces_udev_rules(interfaces), + ContainsAll([ + 'ATTR{address}=="%s", NAME="%s"\n' % (interface, mac) + for mac, interface in interfaces + ])) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tests/test_upgrade_cluster.py maas-1.7.6+bzr3376/src/provisioningserver/tests/test_upgrade_cluster.py --- maas-1.5.4+bzr2294/src/provisioningserver/tests/test_upgrade_cluster.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tests/test_upgrade_cluster.py 2015-07-10 01:27:14.000000000 +0000 @@ -15,9 +15,9 @@ __all__ = [] from argparse import ArgumentParser -from os import makedirs +from itertools import product +import os import os.path -from textwrap import dedent from maastesting.factory import factory from maastesting.matchers import ( @@ -25,16 +25,19 @@ MockNotCalled, ) from maastesting.testcase import MAASTestCase +from maastesting.utils import sample_binary_data from mock import Mock -from provisioningserver import upgrade_cluster -from provisioningserver.config import BootConfig -from provisioningserver.testing.config import ( - BootConfigFixture, - ConfigFixture, +from provisioningserver import ( + config, + upgrade_cluster, ) +from provisioningserver.boot.tftppath import list_subdirs +from provisioningserver.utils.fs import read_text_file from testtools.matchers import ( + DirExists, FileContains, - StartsWith, + FileExists, + Not, ) @@ -54,6 +57,7 @@ def test_calls_hooks(self): upgrade_hook = Mock() + upgrade_hook.__name__ = "upgrade_hook" self.patch_upgrade_hooks([upgrade_hook]) self.run_command() self.assertThat(upgrade_hook, MockCalledOnceWith()) @@ -79,273 +83,278 @@ self.assertEqual(['first', 'middle', 'last'], calls) -class TestGenerateBootResourcesConfig(MAASTestCase): - """Tests for the `generate_boot_resources_config` upgrade.""" +class TestMakeMAASOwnBootResources(MAASTestCase): + """Tests for the `make_maas_own_boot_resources` upgrade.""" - def patch_rewrite_boot_resources_config(self): - """Patch `rewrite_boot_resources_config` with a mock.""" - return self.patch(upgrade_cluster, 'rewrite_boot_resources_config') - - def patch_boot_config(self, config): - """Replace the bootresources config with the given fake.""" - fixture = BootConfigFixture(config) - self.useFixture(fixture) - path = fixture.filename - self.patch(upgrade_cluster, 'locate_config').return_value = path - return path + def configure_storage(self, storage_dir): + """Create a storage config.""" + self.patch(config, 'BOOT_RESOURCES_STORAGE', storage_dir) + + def test__calls_chown_if_boot_resources_dir_exists(self): + self.patch(upgrade_cluster, 'check_call') + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + upgrade_cluster.make_maas_own_boot_resources() + self.assertThat( + upgrade_cluster.check_call, + MockCalledOnceWith(['chown', '-R', 'maas', storage_dir])) - def test_hook_does_nothing_if_configure_me_is_False(self): - self.patch_boot_config({'boot': {'configure_me': False}}) - rewrite_config = self.patch_rewrite_boot_resources_config() - upgrade_cluster.generate_boot_resources_config() - self.assertThat(rewrite_config, MockNotCalled()) - - def test_hook_does_nothing_if_configure_me_is_missing(self): - self.patch_boot_config({'boot': {}}) - rewrite_config = self.patch_rewrite_boot_resources_config() - upgrade_cluster.generate_boot_resources_config() - self.assertThat(rewrite_config, MockNotCalled()) - - def test_hook_rewrites_if_configure_me_is_True(self): - config_file = self.patch_boot_config({'boot': {'configure_me': True}}) - rewrite_config = self.patch_rewrite_boot_resources_config() - upgrade_cluster.generate_boot_resources_config() - self.assertThat(rewrite_config, MockCalledOnceWith(config_file)) - - def test_find_old_imports_returns_empty_if_no_tftproot(self): - non_dir = os.path.join(self.make_dir(), factory.make_name('nonesuch')) - self.assertEqual(set(), upgrade_cluster.find_old_imports(non_dir)) - - def test_find_old_imports_returns_empty_if_tftproot_is_empty(self): - self.assertEqual( - set(), - upgrade_cluster.find_old_imports(self.make_dir())) - - def test_find_old_imports_finds_image(self): - tftproot = self.make_dir() - arch = factory.make_name('arch') - subarch = factory.make_name('subarch') - release = factory.make_name('release') - purpose = factory.make_name('purpose') - makedirs(os.path.join(tftproot, arch, subarch, release, purpose)) - self.assertEqual( - {(arch, subarch, release)}, - upgrade_cluster.find_old_imports(tftproot)) - - def test_generate_selections_returns_None_if_no_images_found(self): - self.assertIsNone(upgrade_cluster.generate_selections([])) - - def test_generate_selections_matches_image(self): - arch = factory.make_name('arch') - subarch = factory.make_name('subarch') - release = factory.make_name('release') - self.assertEqual( - [ - { - 'release': release, - 'arches': [arch], - 'subarches': [subarch], - }, - ], - upgrade_cluster.generate_selections([(arch, subarch, release)])) - - def test_generate_selections_sorts_output(self): - images = [ - ( - factory.make_name('arch'), - factory.make_name('subarch'), - factory.make_name('release'), - ) - for _ in range(3) - ] - self.assertEqual( - upgrade_cluster.generate_selections(sorted(images)), - upgrade_cluster.generate_selections(sorted(images, reverse=True))) - - def test_generate_updated_config_clears_configure_me_if_no_images(self): - config = {'boot': {'configure_me': True, 'sources': []}} - self.assertNotIn( - 'configure_me', - upgrade_cluster.generate_updated_config(config, None)['boot']) - - def test_generate_updated_config_clears_configure_me_if_has_images(self): - image = ( - factory.make_name('arch'), - factory.make_name('subarch'), - factory.make_name('release'), - ) - config = {'boot': {'configure_me': True, 'sources': []}} - self.assertNotIn( - 'configure_me', - upgrade_cluster.generate_updated_config(config, [image])['boot']) - - def test_generate_updated_config_leaves_static_entries_intact(self): - storage = factory.make_name('storage') - path = factory.make_name('path') - keyring = factory.make_name('keyring') - config = { - 'boot': { - 'configure_me': True, - 'storage': storage, - 'sources': [ - { - 'path': path, - 'keyring': keyring, - }, - ], - }, - } - # Set configure_me; generate_updated_config expects it. - config['boot']['configure_me'] = True - - result = upgrade_cluster.generate_updated_config(config, []) - self.assertEqual(storage, result['boot']['storage']) - self.assertEqual(path, result['boot']['sources'][0]['path']) - self.assertEqual(keyring, result['boot']['sources'][0]['keyring']) - - def test_generate_updated_config_updates_sources(self): - arch = factory.make_name('arch') - subarch = factory.make_name('subarch') - release = factory.make_name('release') - path1 = factory.make_name('path') - path2 = factory.make_name('path') - config = { - 'boot': { - 'configure_me': True, - # There are two sources. Both will have their selections set. - 'sources': [ - {'path': path1}, - {'path': path2} - ], - }, - } - result = upgrade_cluster.generate_updated_config( - config, [(arch, subarch, release)]) - self.assertEqual( - [ - { - 'path': path1, - 'selections': [ - { - 'release': release, - 'arches': [arch], - 'subarches': [subarch], - }, - ], - }, - { - 'path': path2, - 'selections': [ - { - 'release': release, - 'arches': [arch], - 'subarches': [subarch], - }, - ], - }, - ], - result['boot']['sources']) - - def test_generate_updated_config_does_not_touch_sources_if_no_images(self): - path = factory.make_name('path') - arches = [factory.make_name('arch') for _ in range(2)] - config = { - 'boot': { - 'configure_me': True, - 'sources': [ - { - 'path': path, - 'selections': [{'arches': arches}], - }, - ], - }, - } - no_images = set() - result = upgrade_cluster.generate_updated_config(config, no_images) - self.assertEqual( - [ - { - 'path': path, - 'selections': [{'arches': arches}], - }, - ], - result['boot']['sources']) - - def test_extract_top_comment_reads_up_to_first_non_comment_text(self): - header = dedent("""\ - # Comment. - - # Comment after blank line. - # Indented comment. - """) - filename = self.make_file(contents=(header + 'text#')) - self.assertEqual(header, upgrade_cluster.extract_top_comment(filename)) - - def test_update_config_file_rewrites_file_in_place(self): - old_storage = factory.make_name('old') - new_storage = factory.make_name('new') - original_file = dedent("""\ - # Top comment. - boot: - configure_me: True - storage: %s - """) % old_storage - expected_file = dedent("""\ - # Top comment. - boot: - storage: %s - """) % new_storage - config_file = self.make_file(contents=original_file) - - upgrade_cluster.update_config_file( - config_file, {'boot': {'storage': new_storage}}) - - self.assertThat(config_file, FileContains(expected_file)) - - def test_update_config_file_flushes_config_cache(self): - self.patch(BootConfig, 'flush_cache') - config_file = self.make_file() - upgrade_cluster.update_config_file(config_file, {}) + def test__skips_chown_if_boot_resources_dir_does_not_exist(self): + self.patch(upgrade_cluster, 'check_call') + storage_dir = os.path.join(self.make_dir(), factory.make_name('none')) + self.configure_storage(storage_dir) + upgrade_cluster.make_maas_own_boot_resources() + self.assertThat(upgrade_cluster.check_call, MockNotCalled()) + + +class TestCreateGNUPGHome(MAASTestCase): + """Tests for `create_gnupg_home`.""" + + def make_nonexistent_path(self, parent_dir): + """Return an as-yet nonexistent path, inside `parent_dir`.""" + return os.path.join(parent_dir, factory.make_name('gpghome')) + + def patch_gnupg_home(self, gpghome): + self.patch(upgrade_cluster, 'get_maas_user_gpghome').return_value = ( + gpghome) + + def patch_call(self): + return self.patch(upgrade_cluster, 'check_call') + + def test__succeeds_if_directory_exists(self): + existing_home = self.make_dir() + self.patch_gnupg_home(existing_home) + self.patch_call() + upgrade_cluster.create_gnupg_home() + self.assertEqual([], os.listdir(existing_home)) + + def test__creates_directory(self): + parent = self.make_dir() + new_home = self.make_nonexistent_path(parent) + self.patch_gnupg_home(new_home) + self.patch_call() + upgrade_cluster.create_gnupg_home() + self.assertThat(new_home, DirExists()) + + def test__sets_ownership_to_maas_if_running_as_root(self): + parent = self.make_dir() + new_home = self.make_nonexistent_path(parent) + self.patch_gnupg_home(new_home) + call = self.patch_call() + self.patch(os, 'geteuid').return_value = 0 + upgrade_cluster.create_gnupg_home() self.assertThat( - BootConfig.flush_cache, MockCalledOnceWith(config_file)) + call, MockCalledOnceWith(['chown', 'maas:maas', new_home])) - def test_rewrite_boot_resources_config_integrates(self): - tftproot = self.make_dir() - # Fake pre-existing images in a pre-Simplestreams TFTP directory tree. - self.useFixture(ConfigFixture({'tftp': {'root': tftproot}})) - arch = factory.make_name('arch') - subarch = factory.make_name('subarch') - release = factory.make_name('release') - purpose = factory.make_name('purpose') - makedirs(os.path.join(tftproot, arch, subarch, release, purpose)) - - config_file = self.make_file(contents=dedent("""\ - # Boot resources configuration file. - # - # Configuration follows. - - boot: - # This setting will be removed during rewrite. - configure_me: True - - storage: "/var/lib/maas/boot-resources/" - - sources: - - path: "http://maas.ubuntu.com/images/somewhere" - keyring: "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg" - - selections: - - release: "trusty" - """)) + def test__does_not_set_ownership_if_not_running_as_root(self): + parent = self.make_dir() + new_home = self.make_nonexistent_path(parent) + self.patch_gnupg_home(new_home) + call = self.patch_call() + self.patch(os, 'geteuid').return_value = 101 + upgrade_cluster.create_gnupg_home() + self.assertThat(call, MockNotCalled()) + + +class TestRetireBootResourcesYAML(MAASTestCase): + """Tests for `retire_bootresources_yaml`.""" + + def set_bootresources_yaml(self, contents): + """Write a fake `bootresources.yaml`, and return its path.""" + path = self.make_file('bootresources.yaml', contents=contents) + self.patch(upgrade_cluster, 'BOOTRESOURCES_FILE', path) + return path + + def test__does_nothing_if_file_not_present(self): + path = self.set_bootresources_yaml('') + os.remove(path) + upgrade_cluster.retire_bootresources_yaml() + self.assertThat(path, Not(FileExists())) + + def test__prefixes_header_to_file_if_present(self): + content = factory.make_string() + path = self.set_bootresources_yaml(content) + upgrade_cluster.retire_bootresources_yaml() + self.assertThat( + path, + FileContains(upgrade_cluster.BOOTRESOURCES_WARNING + content)) - upgrade_cluster.rewrite_boot_resources_config(config_file) + def test__is_idempotent(self): + path = self.set_bootresources_yaml(factory.make_string()) + upgrade_cluster.retire_bootresources_yaml() + content_after_upgrade = read_text_file(path) + upgrade_cluster.retire_bootresources_yaml() + self.assertThat(path, FileContains(content_after_upgrade)) + + def test__survives_encoding_problems(self): + path = os.path.join(self.make_dir(), 'bootresources.yaml') + content = b'[[%s]]' % sample_binary_data + with open(path, 'wb') as config: + config.write(content) + self.patch(upgrade_cluster, 'BOOTRESOURCES_FILE', path) + upgrade_cluster.retire_bootresources_yaml() + self.assertThat( + path, + FileContains( + upgrade_cluster.BOOTRESOURCES_WARNING.encode('ascii') + + content)) + + +class TestMigrateArchitecturesIntoUbuntuDirectory(MAASTestCase): + """Tests for the `migrate_architectures_into_ubuntu_directory` upgrade.""" + + def configure_storage(self, storage_dir, make_current_dir=True): + """Create a storage config.""" + if make_current_dir: + current_dir = os.path.join(storage_dir, "current") + os.mkdir(current_dir) + self.patch(config, 'BOOT_RESOURCES_STORAGE', storage_dir) + + def test__list_subdirs_under_current_directory(self): + self.patch(upgrade_cluster, 'list_subdirs').return_value = ['ubuntu'] + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertThat( + upgrade_cluster.list_subdirs, + MockCalledOnceWith(os.path.join(storage_dir, "current"))) + def test__exits_early_if_boot_resources_dir_does_not_exist(self): + # Patch list_subdirs, if it gets called then the method did not + # exit early. + self.patch(upgrade_cluster, 'list_subdirs') + storage_dir = os.path.join(self.make_dir(), factory.make_name('none')) + self.configure_storage(storage_dir, make_current_dir=False) + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertThat(upgrade_cluster.list_subdirs, MockNotCalled()) + + def test__exits_early_if_current_dir_does_not_exist(self): + # Patch list_subdirs, if it gets called then the method did not + # exit early. + self.patch(upgrade_cluster, 'list_subdirs') + storage_dir = self.make_dir() + self.configure_storage(storage_dir, make_current_dir=False) + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertThat(upgrade_cluster.list_subdirs, MockNotCalled()) + + def test__exits_early_if_ubuntu_dir_exist(self): + # Patch drill_down, if it gets called then the method did not + # exit early. + self.patch(upgrade_cluster, 'drill_down') + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + os.mkdir(os.path.join(storage_dir, 'current', 'ubuntu')) + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertThat(upgrade_cluster.drill_down, MockNotCalled()) + + def test__doesnt_create_ubuntu_dir_when_no_valid_directories(self): + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertFalse( + os.path.exists(os.path.join(storage_dir, 'current', 'ubuntu'))) + + def test__moves_paths_with_correct_levels_into_ubuntu_dir(self): + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + arches = [factory.make_name('arch') for _ in range(3)] + subarches = [factory.make_name('subarch') for _ in range(3)] + releases = [factory.make_name('release') for _ in range(3)] + labels = [factory.make_name('label') for _ in range(3)] + for arch, subarch, release, label in product( + arches, subarches, releases, labels): + os.makedirs( + os.path.join( + storage_dir, 'current', arch, subarch, release, label)) + self.patch(upgrade_cluster, 'update_targets_conf') + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertItemsEqual( + arches, + list_subdirs(os.path.join(storage_dir, 'current', 'ubuntu'))) + + def test__doesnt_move_paths_with_fewer_levels_into_ubuntu_dir(self): + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + arches = [factory.make_name('arch') for _ in range(3)] + subarches = [factory.make_name('subarch') for _ in range(3)] + releases = [factory.make_name('release') for _ in range(3)] + # Labels directory is missing, causing none of the folders to move + for arch, subarch, release in product( + arches, subarches, releases): + os.makedirs( + os.path.join(storage_dir, 'current', arch, subarch, release)) + move_arch = factory.make_name('arch') + os.makedirs( + os.path.join( + storage_dir, + 'current', + move_arch, + factory.make_name('subarch'), + factory.make_name('release'), + factory.make_name('label'))) + self.patch(upgrade_cluster, 'update_targets_conf') + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertItemsEqual( + [move_arch], + list_subdirs(os.path.join(storage_dir, 'current', 'ubuntu'))) + + def test__doesnt_move_paths_with_more_levels_into_ubuntu_dir(self): + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + # Extra directory level, this is what it looks like after upgrade. + osystems = [factory.make_name('arch') for _ in range(3)] + arches = [factory.make_name('arch') for _ in range(3)] + subarches = [factory.make_name('subarch') for _ in range(3)] + releases = [factory.make_name('release') for _ in range(3)] + labels = [factory.make_name('label') for _ in range(3)] + for osystem, arch, subarch, release, label in product( + osystems, arches, subarches, releases, labels): + os.makedirs( + os.path.join( + storage_dir, 'current', osystem, arch, subarch, + release, label)) + move_arch = factory.make_name('arch') + os.makedirs( + os.path.join( + storage_dir, + 'current', + move_arch, + factory.make_name('subarch'), + factory.make_name('release'), + factory.make_name('label'))) + self.patch(upgrade_cluster, 'update_targets_conf') + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertItemsEqual( + [move_arch], + list_subdirs(os.path.join(storage_dir, 'current', 'ubuntu'))) + + def setup_working_migration_scenario(self): + storage_dir = self.make_dir() + self.configure_storage(storage_dir) + arches = [factory.make_name('arch') for _ in range(3)] + subarches = [factory.make_name('subarch') for _ in range(3)] + releases = [factory.make_name('release') for _ in range(3)] + labels = [factory.make_name('label') for _ in range(3)] + for arch, subarch, release, label in product( + arches, subarches, releases, labels): + os.makedirs( + os.path.join( + storage_dir, 'current', arch, subarch, release, label)) + return storage_dir + + def test__calls_write_targets_conf_with_current_dir(self): + storage_dir = self.setup_working_migration_scenario() + mock_write = self.patch(upgrade_cluster, 'write_targets_conf') + self.patch(upgrade_cluster, 'update_targets_conf') + upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat( - config_file, - FileContains(matcher=StartsWith(dedent("""\ - # Boot resources configuration file. - # - # Configuration follows. + mock_write, + MockCalledOnceWith(os.path.join(storage_dir, 'current'))) - boot: - """)))) + def test__calls_update_targets_conf_with_current_dir(self): + storage_dir = self.setup_working_migration_scenario() + mock_update = self.patch(upgrade_cluster, 'update_targets_conf') + upgrade_cluster.migrate_architectures_into_ubuntu_directory() + self.assertThat( + mock_update, + MockCalledOnceWith(os.path.join(storage_dir, 'current'))) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/tftp.py maas-1.7.6+bzr3376/src/provisioningserver/tftp.py --- maas-1.5.4+bzr2294/src/provisioningserver/tftp.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/tftp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,258 +0,0 @@ -# Copyright 2012-2014 Canonical Ltd. This software is licensed under the -# GNU Affero General Public License version 3 (see the file LICENSE). - -"""Twisted Application Plugin for the MAAS TFTP server.""" - -from __future__ import ( - absolute_import, - print_function, - unicode_literals, - ) - -str = None - -__metaclass__ = type -__all__ = [ - "TFTPBackend", - "TFTPService", - ] - -import httplib -import json -from urllib import urlencode -from urlparse import ( - parse_qsl, - urlparse, - ) - -from provisioningserver.boot import BootMethodRegistry -from provisioningserver.cluster_config import get_cluster_uuid -from provisioningserver.driver import ArchitectureRegistry -from provisioningserver.kernel_opts import KernelParameters -from provisioningserver.utils import ( - deferred, - get_all_interface_addresses, - ) -from tftp.backend import FilesystemSynchronousBackend -from tftp.errors import FileNotFound -from tftp.protocol import TFTP -from twisted.application import internet -from twisted.application.service import MultiService -from twisted.python.context import get -from twisted.web.client import getPage -import twisted.web.error - - -class TFTPBackend(FilesystemSynchronousBackend): - """A partially dynamic read-only TFTP server. - - Static files such as kernels and initrds, as well as any non-MAAS files - that the system may already be set up to serve, are served up normally. - But PXE configurations are generated on the fly. - - When a PXE configuration file is requested, the server asynchronously - requests the appropriate parameters from the API (at a configurable - "generator URL") and generates a config file based on those. - - The regular expressions `re_config_file` and `re_mac_address` specify - which files the server generates on the fly. Any other requests are - passed on to the filesystem. - - Passing requests on to the API must be done very selectively, because - failures cause the boot process to halt. This is why the expression for - matching the MAC address is so narrowly defined: PXELINUX attempts to - fetch files at many similar paths which must not be passed on. - """ - - get_page = staticmethod(getPage) - - def __init__(self, base_path, generator_url): - """ - :param base_path: The root directory for this TFTP server. - :param generator_url: The URL which can be queried for the PXE - config. See `get_generator_url` for the types of queries it is - expected to accept. - """ - super(TFTPBackend, self).__init__( - base_path, can_read=True, can_write=False) - self.generator_url = urlparse(generator_url) - - def get_generator_url(self, params): - """Calculate the URL, including query, from which we can fetch - additional configuration parameters. - - :param params: A dict, or iterable suitable for updating a dict, of - additional query parameters. - """ - query = {} - # Merge parameters from the generator URL. - query.update(parse_qsl(self.generator_url.query)) - # Merge parameters obtained from the request. - query.update(params) - # Merge updated query into the generator URL. - url = self.generator_url._replace(query=urlencode(query)) - # TODO: do something more intelligent with unicode URLs here; see - # apiclient.utils.ascii_url() for inspiration. - return url.geturl().encode("ascii") - - def get_boot_method(self, file_name): - """Finds the correct boot method.""" - for _, method in BootMethodRegistry: - params = method.match_path(self, file_name) - if params is not None: - return method, params - return None, None - - @deferred - def get_kernel_params(self, params): - """Return kernel parameters obtained from the API. - - :param params: Parameters so far obtained, typically from the file - path requested. - :return: A `KernelParameters` instance. - """ - url = self.get_generator_url(params) - - def reassemble(data): - return KernelParameters(**data) - - d = self.get_page(url) - d.addCallback(json.loads) - d.addCallback(reassemble) - return d - - @deferred - def get_boot_method_reader(self, boot_method, params): - """Return an `IReader` for a boot method. - - :param boot_method: Boot method that is generating the config - :param params: Parameters so far obtained, typically from the file - path requested. - """ - def generate(kernel_params): - return boot_method.get_reader( - self, kernel_params=kernel_params, **params) - - d = self.get_kernel_params(params) - d.addCallback(generate) - return d - - @staticmethod - def get_page_errback(failure, file_name): - failure.trap(twisted.web.error.Error) - # This twisted.web.error.Error.status object ends up being a - # string for some reason, but the constants we can compare against - # (both in httplib and twisted.web.http) are ints. - try: - status_int = int(failure.value.status) - except ValueError: - # Assume that it's some other error and propagate it - return failure - - if status_int == httplib.NO_CONTENT: - # Convert HTTP No Content to a TFTP file not found - raise FileNotFound(file_name) - else: - # Otherwise propogate the unknown error - return failure - - @deferred - def get_reader(self, file_name): - """See `IBackend.get_reader()`. - - If `file_name` matches a boot method then the response is obtained - from that boot method. Otherwise the filesystem is used to service - the response. - """ - boot_method, params = self.get_boot_method(file_name) - if boot_method is None: - return super(TFTPBackend, self).get_reader(file_name) - - # Map pxe namespace architecture names to MAAS's. - arch = params.get("arch") - if arch is not None: - maasarch = ArchitectureRegistry.get_by_pxealias(arch) - if maasarch is not None: - params["arch"] = maasarch.name.split("/")[0] - - # Send the local and remote endpoint addresses. - local_host, local_port = get("local", (None, None)) - params["local"] = local_host - remote_host, remote_port = get("remote", (None, None)) - params["remote"] = remote_host - params["cluster_uuid"] = get_cluster_uuid() - d = self.get_boot_method_reader(boot_method, params) - d.addErrback(self.get_page_errback, file_name) - return d - - -class TFTPService(MultiService, object): - """An umbrella service representing a set of running TFTP servers. - - Creates a UDP server individually for each discovered network - interface, so that we can detect the interface via which we have - received a datagram. - - It then periodically updates the servers running in case there's a - change to the host machine's network configuration. - - :ivar backend: The :class:`TFTPBackend` being used to service TFTP - requests. - - :ivar port: The port on which each server is started. - - :ivar refresher: A :class:`TimerService` that calls - ``updateServers`` periodically. - - """ - - def __init__(self, resource_root, port, generator): - """ - :param resource_root: The root directory for this TFTP server. - :param port: The port on which each server should be started. - :param generator: The URL to be queried for PXE configuration. - This will normally point to the `pxeconfig` endpoint on the - region-controller API. - """ - super(TFTPService, self).__init__() - self.backend = TFTPBackend(resource_root, generator) - self.port = port - # Establish a periodic call to self.updateServers() every 45 - # seconds, so that this service eventually converges on truth. - # TimerService ensures that a call is made to it's target - # function immediately as it's started, so there's no need to - # call updateServers() from here. - self.refresher = internet.TimerService(45, self.updateServers) - self.refresher.setName("refresher") - self.refresher.setServiceParent(self) - - def getServers(self): - """Return a set of all configured servers. - - :rtype: :class:`set` of :class:`internet.UDPServer` - """ - return { - service for service in self - if service is not self.refresher - } - - def updateServers(self): - """Run a server on every interface. - - For each configured network interface this will start a TFTP - server. If called later it will bring up servers on newly - configured interfaces and bring down servers on deconfigured - interfaces. - """ - addrs_established = set(service.name for service in self.getServers()) - addrs_desired = set(get_all_interface_addresses()) - - for address in addrs_desired - addrs_established: - tftp_service = internet.UDPServer( - self.port, TFTP(self.backend), interface=address) - tftp_service.setName(address) - tftp_service.setServiceParent(self) - - for address in addrs_established - addrs_desired: - tftp_service = self.getServiceNamed(address) - tftp_service.disownServiceParent() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/udev.py maas-1.7.6+bzr3376/src/provisioningserver/udev.py --- maas-1.5.4+bzr2294/src/provisioningserver/udev.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/udev.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Code to generate `udev` rules.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'compose_network_interfaces_udev_rules', + ] + +from textwrap import dedent + + +def compose_udev_equality(key, value): + """Return a udev comparison clause, like `ACTION=="add"`.""" + assert key == key.upper() + return '%s=="%s"' % (key, value) + + +def compose_udev_attr_equality(attribute, value): + """Return a udev attribute comparison clause, like `ATTR{type}=="1"`.""" + assert attribute == attribute.lower() + return 'ATTR{%s}=="%s"' % (attribute, value) + + +def compose_udev_setting(key, value): + """Return a udev assignment clause, like `NAME="eth0"`.""" + assert key == key.upper() + return '%s="%s"' % (key, value) + + +def compose_udev_rule(interface, mac): + """Return a udev rule to set the name of network interface with `mac`. + + The rule ends up as a single line looking something like: + + SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", + ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0" + + (Note the difference between `=` and `==`: they both occur.) + """ + rule = ', '.join([ + compose_udev_equality('SUBSYSTEM', 'net'), + compose_udev_equality('ACTION', 'add'), + compose_udev_equality('DRIVERS', '?*'), + compose_udev_attr_equality('address', mac), + compose_udev_setting('NAME', interface), + ]) + return '%s\n' % rule + + +def compose_network_interfaces_udev_rules(interfaces): + """Return text for a udev persistent-net rules file. + + These rules assign fixed names to network interfaces. They ensure that + the same network interface cards come up with the same interface names on + every boot. Otherwise, the kernel may assign interface names in different + orders on every boot, and so network interfaces can "switch identities" + every other time the machine reboots. + + :param interfaces: List of tuples of interface name and MAC address. + :return: Text to write into a udev rules file. + """ + rules = [ + compose_udev_rule(interface, mac) + for interface, mac in interfaces + ] + return dedent("""\ + # MAAS-assigned network interface names. + %s + # End of MAAS-assigned network interface names. + """) % '\n\n'.join(rules) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/upgrade_cluster.py maas-1.7.6+bzr3376/src/provisioningserver/upgrade_cluster.py --- maas-1.5.4+bzr2294/src/provisioningserver/upgrade_cluster.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/upgrade_cluster.py 2015-07-10 01:27:14.000000000 +0000 @@ -32,157 +32,181 @@ 'run', ] -from logging import getLogger -import os.path - -from provisioningserver.boot.tftppath import drill_down -from provisioningserver.config import ( - BootConfig, - Config, +import os +from os import makedirs +import shutil +from subprocess import check_call +from textwrap import dedent + +from provisioningserver import config +from provisioningserver.auth import get_maas_user_gpghome +from provisioningserver.boot.tftppath import ( + drill_down, + list_subdirs, ) -from provisioningserver.utils import ( - atomic_write, - locate_config, - read_text_file, +from provisioningserver.import_images.boot_resources import ( + update_targets_conf, + write_targets_conf, ) -import yaml - - -logger = getLogger(__name__) - +from provisioningserver.logger import get_maas_logger -def find_old_imports(tftproot): - """List pre-Simplestreams boot images. - - Supports the `generate_boot_resources_config` upgrade hook. Returns a set - of tuples (arch, subarch, release) describing all of the images found. - """ - if not os.path.isdir(tftproot): - return set() - paths = [[tftproot]] - for level in ['arch', 'subarch', 'release', 'purpose']: - paths = drill_down(tftproot, paths) - return { - (arch, subarch, release) - for [root, arch, subarch, release, purpose] in paths - } - - -def generate_selections(images): - """Generate `selections` stanzas to match pre-existing boot images. - - Supports the `generate_boot_resources_config` upgrade hook. - - :param images: An iterable of (arch, subarch, release) tuples as returned - by `find_old_imports`. - :return: A list of dicts, each describing one `selections` stanza for the - `bootresources.yaml` file. - """ - if len(images) == 0: - # No old images found. - return None - else: - # Return one "selections" stanza per image. This could be cleverer - # and combine multiple architectures/subarchitectures, but there would - # have to be a clear gain. Simple is good. - return [ - { - 'release': release, - 'arches': [arch], - 'subarches': [subarch], - } - for arch, subarch, release in sorted(images) - ] - - -def generate_updated_config(config, old_images): - """Return an updated version of a config dict. - - Supports the `generate_boot_resources_config` upgrade hook. - - This clears the `configure_me` flag, and replaces all sources' - `selections` stanzas with ones based on the old boot images. - - :param config: A config dict, as loaded from `bootresources.yaml`. - :param old_images: Old-style boot images, as returned by - `find_old_imports`. If `None`, the existing `selections` are left - unchanged. - :return: An updated version of `config` with the above changes. - """ - config = config.copy() - # Remove the configure_me item. It's there exactly to tell us that we - # haven't done this rewrite yet. - del config['boot']['configure_me'] - if old_images is None: - return config - - # If we found old images, rewrite the selections. - if len(old_images) != 0: - new_selections = generate_selections(old_images) - for source in config['boot']['sources']: - source['selections'] = new_selections - return config +maaslog = get_maas_logger("cluster_upgrade") -def extract_top_comment(input_file): - """Return just the comment at the top of `input_file`. - Supports the `generate_boot_resources_config` upgrade hook. - """ - lines = [] - for line in read_text_file(input_file).splitlines(): - stripped_line = line.lstrip() - if stripped_line != '' and not stripped_line.startswith('#'): - # Not an empty line or comment any more. Stop. - break - lines.append(line) - return '\n'.join(lines) + '\n' - - -def update_config_file(config_file, new_config): - """Replace configuration data in `config_file` with `new_config`. - - Supports the `generate_boot_resources_config` upgrade hook. - - The first part of the config file, up to the first text that isn't a - comment, is kept intact. The part after that is overwritten with YAML - for the new configuration. - """ - header = extract_top_comment(config_file) - data = yaml.safe_dump(new_config, default_flow_style=False) - content = (header + data).encode('utf-8') - atomic_write(content, config_file, mode=0644) - BootConfig.flush_cache(config_file) - - -def rewrite_boot_resources_config(config_file): - """Rewrite the `bootresources.yaml` configuration. - - Supports the `generate_boot_resources_config` upgrade hook. - """ - # Look for images using the old tftp root setting, not the tftp - # resource_root setting. The latter points to where the newer, - # Simplestreams-based boot images live. - # This should be the final use of the old tftp root setting. After this - # has run, it serves no more purpose. - tftproot = Config.load_from_cache()['tftp']['root'] - config = BootConfig.load_from_cache(config_file) - old_images = find_old_imports(tftproot) - new_config = generate_updated_config(config, old_images) - update_config_file(config_file, new_config) - - -def generate_boot_resources_config(): - """Upgrade hook: rewrite `bootresources.yaml` based on boot images. - - This finds boot images downloaded into the old, pre-Simplestreams tftp - root, and writes a boot-resources configuration to import a similar set of - images using Simplestreams. - """ - config_file = locate_config('bootresources.yaml') - boot_resources = BootConfig.load_from_cache(config_file) - if boot_resources['boot'].get('configure_me', False): - rewrite_boot_resources_config(config_file) +def make_maas_own_boot_resources(): + """Upgrade hook: make the `maas` user the owner of the boot resources.""" + # This reduces the privileges required for importing and managing images. + if os.path.isdir(config.BOOT_RESOURCES_STORAGE): + check_call(['chown', '-R', 'maas', config.BOOT_RESOURCES_STORAGE]) + + +def create_gnupg_home(): + """Upgrade hook: create maas user's GNUPG home directory.""" + gpghome = get_maas_user_gpghome() + if not os.path.isdir(gpghome): + makedirs(gpghome) + if os.geteuid() == 0: + # Make the maas user the owner of its GPG home. Do this only if + # running as root; otherwise it would probably fail. We want to + # be able to start a development instance without triggering that. + check_call(['chown', 'maas:maas', gpghome]) + + +# Path to obsolete boot-resources configuration. +BOOTRESOURCES_FILE = '/etc/maas/bootresources.yaml' + +# Recognisable header, to be prefixed to BOOTRESOURCES_FILE as part of the +# warning that the file is obsolete. The retire_bootresources_yaml upgrade +# hook will prefix this header and further details to the file, if and only +# if this header is not yet present. +BOOTRESOURCES_HEADER = "# THIS FILE IS OBSOLETE." + +# Warning, to be prefixed to BOOTRESOURCES_FILE as an indication that the +# file is obsolete. +BOOTRESOURCES_WARNING = BOOTRESOURCES_HEADER + '\n' + dedent("""\ + # + # The configuration below is no longer in use, and can be removed. + # By default, cluster controllers now import images for all supported + # Ubuntu LTS releases in all supported architectures. + # + # Imports can now be configured through the MAAS region controller API: + # See http://maas.ubuntu.com/docs/api.html#boot-source + # + # To do this, define a boot source through a POST to the nodegroup's + # boot-sources endpoint + # (e.g. http:///api/1.0/nodegroups//boot-sources), and then + # POST to the resulting boot source to define selections. Each cluster + # can have any number of boot sources, and each boot source can have any + # number of selections, as in the old configuration. + # + # The same thing can be done using the command-line front-end for the API. + # After logging in to the MAAS to create a profile, run: + # + # maas boot-sources create \ + url= keyring_filename= + # + # Here, + # * is your login profile in the 'maas' command. + # * is the UUID of the cluster. + # * is the source's path as found in this config file. + # * is the keyring entry as found in this config file. + # + # Full documentation can be found at http://maas.ubuntu.com/docs/ + # + # The maas-import-pxe-files import script is now deprecated; use the + # MAAS web UI, region-controller, or the "maas" command to trigger any + # manual imports. + # + # If you do wish to continue using maas-import-pxe-files for the time + # being, the script now requires a sources definition consisting of + # just the contents of the "sources" section as found in this + # configuration file. See the script's man page for an example. + """) + '\n' + + +def retire_bootresources_yaml(): + """Upgrade hook: mark `/etc/maas/bootresources.yaml` as obsolete. + + Prefixes `BOOTRESOURCES_WARNING` to the config file, if present. + + This file was temporarily used in MAAS 1.5 to let users restrict which + boot resources should be downloaded, where from, and to where in the + filesystem. The settings have been replaced with model classes. + """ + if not os.path.isfile(BOOTRESOURCES_FILE): + return + header = BOOTRESOURCES_HEADER.encode('ascii') + warning = BOOTRESOURCES_WARNING.encode('ascii') + with open(BOOTRESOURCES_FILE, 'r+b') as old_config: + old_contents = old_config.read() + if old_contents.startswith(header): + # Warning is already there. + return + old_config.seek(0) + old_config.write(warning) + old_config.write(old_contents) + + +def filter_out_directories_with_extra_levels(paths): + """Remove paths that contain directories with more levels. We don't want + to move other operating systems under the ubuntu directory.""" + for arch, subarch, release, label in paths: + path = os.path.join( + config.BOOT_RESOURCES_STORAGE, 'current', + arch, subarch, release, label) + if len(list_subdirs(path)) == 0: + yield (arch, subarch, release, label) + + +def migrate_architectures_into_ubuntu_directory(): + """Upgrade hook: move architecture folders under the ubuntu folder. + + With the support of multiple operating systems the structure of the + boot resources directory added another level to the hierarchy. Previously + the hierarchy was arch/subarch/release/label, it has now been modified to + os/arch/subarch/release/label. + + Before multiple operating systems only Ubuntu was supported. Check if + folders have structure arch/subarch/release/label and move them into + ubuntu folder. Making the final path ubuntu/arch/subarch/release/label. + """ + current_dir = os.path.join(config.BOOT_RESOURCES_STORAGE, "current") + if not os.path.isdir(current_dir): + return + # If ubuntu folder already exists, then no reason to continue + if 'ubuntu' in list_subdirs(current_dir): + return + + # Starting point for iteration: paths that contain only the + # top-level subdirectory of tftproot, i.e. the architecture name. + potential_arches = list_subdirs(current_dir) + paths = [[subdir] for subdir in potential_arches] + + # Extend paths deeper into the filesystem, through the levels that + # represent sub-architecture, release, and label. + # Any directory that doesn't extend this deep isn't a boot image. + for level in ['subarch', 'release', 'label']: + paths = drill_down(current_dir, paths) + paths = filter_out_directories_with_extra_levels(paths) + + # Extract the only top directories (arch) from the paths, as we only need + # its name to move into the new 'ubuntu' folder. + arches = {arch for arch, _, _, _ in paths} + if len(arches) == 0: + return + + # Create the ubuntu directory and move the archiecture folders under that + # directory. + ubuntu_dir = os.path.join(current_dir, 'ubuntu') + os.mkdir(ubuntu_dir) + for arch in arches: + shutil.move(os.path.join(current_dir, arch), ubuntu_dir) + + # Re-write the maas.tgt to point to the new location for the ubuntu boot + # resources. + write_targets_conf(current_dir) + update_targets_conf(current_dir) # Upgrade hooks, from oldest to newest. The hooks are callables, taking no @@ -191,7 +215,10 @@ # Each hook figures out for itself whether its changes are needed. There is # no record of previous upgrades. UPGRADE_HOOKS = [ - generate_boot_resources_config, + make_maas_own_boot_resources, + create_gnupg_home, + retire_bootresources_yaml, + migrate_architectures_into_ubuntu_directory, ] @@ -207,4 +234,6 @@ def run(args): """Perform any data migrations needed for upgrading this cluster.""" for hook in UPGRADE_HOOKS: + maaslog.info("Cluster upgrade hook %s started." % hook.__name__) hook() + maaslog.info("Cluster upgrade hook %s finished." % hook.__name__) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/curtin.py maas-1.7.6+bzr3376/src/provisioningserver/utils/curtin.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/curtin.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/curtin.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,55 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities related to Curtin.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'compose_mv_command', + 'compose_recursive_copy', + 'compose_write_text_file', + ] + + +def compose_write_text_file(path, content, owner='root:root', + permissions=0600): + """Return preseed for uploading a text file to the install target. + + Use this to write files into the filesystem that Curtin is installing. The + result goes into a `write_files` preseed entry. + """ + return { + 'path': path, + 'content': content, + 'owner': owner, + 'permissions': '0%o' % permissions, + } + + +def compose_mv_command(source, dest): + """Return preseed for running the `mv` command in the install target. + + Use this for moving files around in the filesystem that Curtin is + installing. The result goes in a preseed entry for running commands, such + as an entry in `late_commands` dict. + """ + return [ + 'curtin', 'in-target', '--', + 'mv', '--', source, dest, + ] + + +def compose_recursive_copy(source, dest): + """Return preseed for running a recursive `cp` in the install target.""" + return [ + 'curtin', 'in-target', '--', + 'cp', '-r', '-p', '--', source, dest, + ] diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/enum.py maas-1.7.6+bzr3376/src/provisioningserver/utils/enum.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/enum.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/enum.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,50 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Enum-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'map_enum', + 'map_enum_reverse', + ] + + +def map_enum(enum_class): + """Map out an enumeration class as a "NAME: value" dict.""" + # Filter out anything that starts with '_', which covers private and + # special methods. We can make this smarter later if we start using + # a smarter enumeration base class etc. Or if we switch to a proper + # enum mechanism, this function will act as a marker for pieces of + # code that should be updated. + return { + key: value + for key, value in vars(enum_class).items() + if not key.startswith('_') + } + + +def map_enum_reverse(enum_class, ignore=None): + """Map out an enumeration class as a "value: NAME" dict. + + Works like `map_enum`, but reverse its keys and values so that you can + look up text representations from the enum's integer value. + + Any keys in `ignore` are left out of the returned dict. This lets you + remove the `DEFAULT` entry that some enum classes have. + """ + if ignore is None: + ignore = [] + return { + value: key + for key, value in map_enum(enum_class).viewitems() + if key not in ignore + } diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/env.py maas-1.7.6+bzr3376/src/provisioningserver/utils/env.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/env.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/env.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Environment-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'environment_variables', + ] + +from contextlib import contextmanager +import os + + +@contextmanager +def environment_variables(variables): + """Context manager: temporarily set the given environment variables. + + The variables are reset to their original settings afterwards. + + :param variables: A dict mapping environment variables to their temporary + values. + """ + prior_environ = os.environ.copy() + os.environ.update(variables) + try: + yield + finally: + os.environ.clear() + os.environ.update(prior_environ) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/fs.py maas-1.7.6+bzr3376/src/provisioningserver/utils/fs.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/fs.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/fs.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,264 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Generic utilities for dealing with files and the filesystem.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'atomic_symlink', + 'atomic_write', + 'ensure_dir', + 'incremental_write', + 'read_text_file', + 'sudo_write_file', + 'tempdir', + 'write_text_file', + ] + + +import codecs +from contextlib import contextmanager +import errno +import os +from os.path import isdir +from shutil import rmtree +from subprocess import ( + PIPE, + Popen, + ) +import sys +import tempfile +from time import time + +from lockfile import FileLock +from provisioningserver.utils.shell import ExternalProcessError + + +def _write_temp_file(content, filename): + """Write the given `content` in a temporary file next to `filename`.""" + # Write the file to a temporary place (next to the target destination, + # to ensure that it is on the same filesystem). + directory = os.path.dirname(filename) + prefix = ".%s." % os.path.basename(filename) + suffix = ".tmp" + try: + temp_fd, temp_file = tempfile.mkstemp( + dir=directory, suffix=suffix, prefix=prefix) + except OSError, error: + if error.filename is None: + error.filename = os.path.join( + directory, prefix + "XXXXXX" + suffix) + raise + else: + with os.fdopen(temp_fd, "wb") as f: + f.write(content) + # Finish writing this file to the filesystem, and then, tell the + # filesystem to push it down onto persistent storage. This + # prevents a nasty hazard in aggressively optimized filesystems + # where you replace an old but consistent file with a new one that + # is still in cache, and lose power before the new file can be made + # fully persistent. + # This was a particular problem with ext4 at one point; it may + # still be. + f.flush() + os.fsync(f) + return temp_file + + +def atomic_write(content, filename, overwrite=True, mode=0600): + """Write `content` into the file `filename` in an atomic fashion. + + This requires write permissions to the directory that `filename` is in. + It creates a temporary file in the same directory (so that it will be + on the same filesystem as the destination) and then renames it to + replace the original, if any. Such a rename is atomic in POSIX. + + :param overwrite: Overwrite `filename` if it already exists? Default + is True. + :param mode: Access permissions for the file, if written. + """ + temp_file = _write_temp_file(content, filename) + os.chmod(temp_file, mode) + try: + if overwrite: + os.rename(temp_file, filename) + else: + lock = FileLock(filename) + lock.acquire() + try: + if not os.path.isfile(filename): + os.rename(temp_file, filename) + finally: + lock.release() + finally: + if os.path.isfile(temp_file): + os.remove(temp_file) + + +def atomic_symlink(source, name): + """Create a symbolic link pointing to `source` named `name`. + + This method is meant to be a drop-in replacement of os.symlink. + + The symlink creation will be atomic. If a file/symlink named + `name` already exists, it will be overwritten. + """ + temp_file = '%s.new' % name + try: + if os.path.exists(temp_file): + os.remove(temp_file) + os.symlink(source, temp_file) + os.rename(temp_file, name) + finally: + if os.path.isfile(temp_file): + os.remove(temp_file) + + +def pick_new_mtime(old_mtime=None, starting_age=1000): + """Choose a new modification time for a file that needs it updated. + + This function is used to manage the modification time of files + for which we need to see an increment in the modification time + each time the file is modified. This is the case for DNS zone + files which only get properly reloaded if BIND sees that the + modification time is > to the time it has in its database. + + Modification time can have a resolution as low as one second in + some relevant environments (we have observed this with ext3). + To produce mtime changes regardless, we set a file's modification + time in the past when it is first written, and + increment it by 1 second on each subsequent write. + + However we also want to be careful not to set the modification time + in the future, mostly because BIND does not deal with that very + well. + + :param old_mtime: File's previous modification time, as a number + with a unity of one second, or None if it did not previously + exist. + :param starting_age: If the file did not exist previously, set its + modification time this many seconds in the past. + """ + now = time() + if old_mtime is None: + # File is new. Set modification time in the past to have room for + # sub-second modifications. + return now - starting_age + elif old_mtime + 1 <= now: + # There is room to increment the file's mtime by one second + # without ending up in the future. + return old_mtime + 1 + else: + # We can't increase the file's modification time. Give up and + # return the previous modification time. + return old_mtime + + +def incremental_write(content, filename, mode=0600): + """Write the given `content` into the file `filename` and + increment the modification time by 1 sec. + + :param mode: Access permissions for the file. + """ + old_mtime = get_mtime(filename) + atomic_write(content, filename, mode=mode) + new_mtime = pick_new_mtime(old_mtime) + os.utime(filename, (new_mtime, new_mtime)) + + +def get_mtime(filename): + """Return a file's modification time, or None if it does not exist.""" + try: + return os.stat(filename).st_mtime + except OSError as e: + if e.errno == errno.ENOENT: + # File does not exist. Be helpful, return None. + return None + else: + # Other failure. The caller will want to know. + raise + + +def sudo_write_file(filename, contents, encoding='utf-8', mode=0644): + """Write (or overwrite) file as root. USE WITH EXTREME CARE. + + Runs an atomic update using non-interactive `sudo`. This will fail if + it needs to prompt for a password. + """ + raw_contents = contents.encode(encoding) + command = [ + 'sudo', '-n', 'maas-provision', 'atomic-write', + '--filename', filename, + '--mode', oct(mode), + ] + proc = Popen(command, stdin=PIPE) + stdout, stderr = proc.communicate(raw_contents) + if proc.returncode != 0: + raise ExternalProcessError(proc.returncode, command, stderr) + + +def ensure_dir(path): + """Do the equivalent of `mkdir -p`, creating `path` if it didn't exist.""" + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + if not isdir(path): + # Path exists, but isn't a directory. + raise + # Otherwise, the error is that the directory already existed. + # Which is actually success. + + +@contextmanager +def tempdir(suffix=b'', prefix=b'maas-', location=None): + """Context manager: temporary directory. + + Creates a temporary directory (yielding its path, as `unicode`), and + cleans it up again when exiting the context. + + The directory will be readable, writable, and searchable only to the + system user who creates it. + + >>> with tempdir() as playground: + ... my_file = os.path.join(playground, "my-file") + ... with open(my_file, 'wb') as handle: + ... handle.write(b"Hello.\n") + ... files = os.listdir(playground) + >>> files + [u'my-file'] + >>> os.path.isdir(playground) + False + """ + path = tempfile.mkdtemp(suffix, prefix, location) + if isinstance(path, bytes): + path = path.decode(sys.getfilesystemencoding()) + assert isinstance(path, unicode) + try: + yield path + finally: + rmtree(path, ignore_errors=True) + + +def read_text_file(path, encoding='utf-8'): + """Read and decode the text file at the given path.""" + with codecs.open(path, encoding=encoding) as infile: + return infile.read() + + +def write_text_file(path, text, encoding='utf-8'): + """Write the given unicode text to the given file path. + + If the file existed, it will be overwritten. + """ + with codecs.open(path, 'w', encoding) as outfile: + outfile.write(text) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/__init__.py maas-1.7.6+bzr3376/src/provisioningserver/utils/__init__.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/__init__.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/__init__.py 2015-07-10 01:27:14.000000000 +0000 @@ -13,127 +13,123 @@ __metaclass__ = type __all__ = [ - "ActionScript", - "atomic_write", - "deferred", + "create_node", "filter_dict", - "find_ip_via_arp", + "flatten", + "get_cluster_config", "import_settings", - "incremental_write", "locate_config", - "MainScript", - "ensure_dir", "parse_key_value_file", - "read_text_file", "ShellTemplate", - "sudo_write_file", + "warn_deprecated", "write_custom_config_section", - "write_text_file", ] -from argparse import ArgumentParser -import codecs -from contextlib import contextmanager -import errno -from functools import wraps -import logging +from collections import Iterable +from itertools import ( + chain, + imap, + ) import os -from os import fdopen -from os.path import isdir from pipes import quote -from shutil import rmtree -import signal -import string -import subprocess -from subprocess import ( - CalledProcessError, - PIPE, - Popen, - ) +import re import sys -import tempfile -from time import time +from sys import _getframe as getframe +from warnings import warn -from crochet import run_in_reactor -from lockfile import FileLock -from lxml import etree -import netifaces +import bson +from provisioningserver.cluster_config import get_cluster_uuid +from provisioningserver.logger.log import get_maas_logger +from provisioningserver.rpc import getRegionClient +from provisioningserver.rpc.exceptions import ( + NoConnectionsAvailable, + NodeAlreadyExists, + ) +from provisioningserver.utils.twisted import ( + pause, + retries, + ) +import simplejson as json import tempita -from twisted.internet.defer import maybeDeferred -from twisted.python.threadable import isInIOThread - -# A table suitable for use with str.translate() to replace each -# non-printable and non-ASCII character in a byte string with a question -# mark, mimicking the "replace" strategy when encoding and decoding. -non_printable_replace_table = b"".join( - chr(i) if chr(i) in string.printable else b"?" - for i in xrange(0xff + 0x01)) - - -class ExternalProcessError(CalledProcessError): - """Raised when there's a problem calling an external command. - - Unlike `CalledProcessError`: - - - `__str__()` returns a string containing the output of the failed - external process, if available. All non-printable and non-ASCII - characters are filtered out, replaced by question marks. - - - `__unicode__()` is defined, and tries to return something - analagous to `__str__()` but keeping in valid unicode characters - from the error message. - - """ - - @staticmethod - def _to_unicode(string): - if isinstance(string, bytes): - return string.decode("ascii", "replace") - else: - return unicode(string) - - @staticmethod - def _to_ascii(string, table=non_printable_replace_table): - if isinstance(string, unicode): - return string.encode("ascii", "replace") - else: - return bytes(string).translate(table) - - def __unicode__(self): - cmd = u" ".join(quote(self._to_unicode(part)) for part in self.cmd) - output = self._to_unicode(self.output) - return u"Command `%s` returned non-zero exit status %d:\n%s" % ( - cmd, self.returncode, output) - - def __str__(self): - cmd = b" ".join(quote(self._to_ascii(part)) for part in self.cmd) - output = self._to_ascii(self.output) - return b"Command `%s` returned non-zero exit status %d:\n%s" % ( - cmd, self.returncode, output) - - -def call_and_check(command, *args, **kwargs): - """A wrapper around subprocess.check_call(). +from twisted.internet import reactor +from twisted.internet.defer import ( + inlineCallbacks, + returnValue, + ) +from twisted.protocols.amp import UnhandledCommand - When an error occurs, raise an ExternalProcessError. - """ - try: - return subprocess.check_call(command, *args, **kwargs) - except subprocess.CalledProcessError as error: - error.__class__ = ExternalProcessError - raise +maaslog = get_maas_logger("utils") -def call_capture_and_check(command, *args, **kwargs): - """A wrapper around subprocess.check_output(). - When an error occurs, raise an ExternalProcessError. - """ +def node_exists(macs, url, client): + decoders = { + "application/json": lambda data: json.loads(data), + "application/bson": lambda data: bson.BSON(data).decode(), + } + params = { + 'mac_address': macs + } + response = client.get(url, + op='list', + **params) + content = response.read() + content_type = response.headers.gettype() + decode = decoders[content_type] + content = decode(content) + return len(content) > 0 + + +@inlineCallbacks +def create_node(macs, arch, power_type, power_parameters): + """Create a Node on the region and return its system_id. + + :param macs: A list of MAC addresses belonging to the node. + :param arch: The node's architecture, in the form 'arch/subarch'. + :param power_type: The node's power type as a string. + :param power_parameters: The power parameters for the node, as a + dict. + """ + # Avoid circular dependencies. + from provisioningserver.rpc.region import CreateNode + for elapsed, remaining, wait in retries(15, 5, reactor): + try: + client = getRegionClient() + break + except NoConnectionsAvailable: + yield pause(wait, reactor) + else: + maaslog.error( + "Can't create node, no RPC connection to region.") + return + + # De-dupe the MAC addresses we pass. We sort here to avoid test + # failures. + macs = sorted(set(macs)) try: - return subprocess.check_output(command, *args, **kwargs) - except subprocess.CalledProcessError as error: - error.__class__ = ExternalProcessError - raise + response = yield client( + CreateNode, + cluster_uuid=get_cluster_uuid(), + architecture=arch, + power_type=power_type, + power_parameters=json.dumps(power_parameters), + mac_addresses=macs) + except NodeAlreadyExists: + # The node already exists on the region, so we log the error and + # give up. + maaslog.error( + "A node with one of the mac addressess in %s already exists.", + macs) + returnValue(None) + except UnhandledCommand: + # The region hasn't been upgraded to support this method + # yet, so give up. + maaslog.error( + "Unable to create node on region: Region does not " + "support the CreateNode RPC method.") + returnValue(None) + else: + returnValue(response['system_id']) def locate_config(*path): @@ -159,6 +155,24 @@ return os.path.abspath(os.path.join(config_dir, *path)) +setting_expression = r""" + ^([A-Z0-9_]+) # Variable name is all caps, alphanumeric and _. + = # Assignment operator. + (?:"|\')? # Optional leading single or double quote. + (.*) # Value + (?:"|\')? # Optional trailing single or double quote. + """ + + +def get_cluster_config(config_path): + contents = open(config_path).read() + + results = re.findall( + setting_expression, contents, re.MULTILINE | re.VERBOSE) + + return dict(results) + + def find_settings(whence): """Return settings from `whence`, which is assumed to be a module.""" # XXX 2012-10-11 JeroenVermeulen, bug=1065456: Put this in a shared @@ -179,74 +193,6 @@ target.update(source) -def deferred(func): - """Decorates a function to ensure that it always returns a `Deferred`. - - This also serves a secondary documentation purpose; functions decorated - with this are readily identifiable as asynchronous. - """ - @wraps(func) - def wrapper(*args, **kwargs): - return maybeDeferred(func, *args, **kwargs) - return wrapper - - -def asynchronous(func): - """Decorates a function to ensure that it always runs in the reactor. - - If the wrapper is called from the reactor thread, it will call - straight through to the wrapped function. It will not be wrapped by - `maybeDeferred` for example. - - If the wrapper is called from another thread, it will return a - :class:`crochet.EventualResult`, as if it had been decorated with - `crochet.run_in_reactor`. - - This also serves a secondary documentation purpose; functions decorated - with this are readily identifiable as asynchronous. - """ - func_in_reactor = run_in_reactor(func) - - @wraps(func) - def wrapper(*args, **kwargs): - if isInIOThread(): - return func(*args, **kwargs) - else: - return func_in_reactor(*args, **kwargs) - return wrapper - - -def synchronous(func): - """Decorator to ensure that `func` never runs in the reactor thread. - - If the wrapped function is called from the reactor thread, this will - raise a :class:`AssertionError`, implying that this is a programming - error. Calls from outside the reactor will proceed unaffected. - - There is an asymettry with the `asynchronous` decorator. The reason - is that it is essential to be aware when `deferToThread()` is being - used, so that in-reactor code knows to synchronise with it, to add a - callback to the :class:`Deferred` that it returns, for example. The - expectation with `asynchronous` is that the return value is always - important, and will be appropriate to the environment in which it is - utilised. - - This also serves a secondary documentation purpose; functions decorated - with this are readily identifiable as synchronous, or blocking. - - :raises AssertionError: When called inside the reactor thread. - """ - @wraps(func) - def wrapper(*args, **kwargs): - if isInIOThread(): - raise AssertionError( - "Function %s(...) must not be called in the " - "reactor thread." % func.__name__) - else: - return func(*args, **kwargs) - return wrapper - - def filter_dict(dictionary, desired_keys): """Return a version of `dictionary` restricted to `desired_keys`. @@ -260,130 +206,11 @@ } -def _write_temp_file(content, filename): - """Write the given `content` in a temporary file next to `filename`.""" - # Write the file to a temporary place (next to the target destination, - # to ensure that it is on the same filesystem). - directory = os.path.dirname(filename) - prefix = ".%s." % os.path.basename(filename) - suffix = ".tmp" - try: - temp_fd, temp_file = tempfile.mkstemp( - dir=directory, suffix=suffix, prefix=prefix) - except OSError, error: - if error.filename is None: - error.filename = os.path.join( - directory, prefix + "XXXXXX" + suffix) - raise - else: - with os.fdopen(temp_fd, "wb") as f: - f.write(content) - # Finish writing this file to the filesystem, and then, tell the - # filesystem to push it down onto persistent storage. This - # prevents a nasty hazard in aggressively optimized filesystems - # where you replace an old but consistent file with a new one that - # is still in cache, and lose power before the new file can be made - # fully persistent. - # This was a particular problem with ext4 at one point; it may - # still be. - f.flush() - os.fsync(f) - return temp_file - - -def atomic_write(content, filename, overwrite=True, mode=0600): - """Write `content` into the file `filename` in an atomic fashion. - - This requires write permissions to the directory that `filename` is in. - It creates a temporary file in the same directory (so that it will be - on the same filesystem as the destination) and then renames it to - replace the original, if any. Such a rename is atomic in POSIX. - - :param overwrite: Overwrite `filename` if it already exists? Default - is True. - :param mode: Access permissions for the file, if written. - """ - temp_file = _write_temp_file(content, filename) - os.chmod(temp_file, mode) - try: - if overwrite: - os.rename(temp_file, filename) - else: - lock = FileLock(filename) - lock.acquire() - try: - if not os.path.isfile(filename): - os.rename(temp_file, filename) - finally: - lock.release() - finally: - if os.path.isfile(temp_file): - os.remove(temp_file) - - -def incremental_write(content, filename, mode=0600): - """Write the given `content` into the file `filename` and - increment the modification time by 1 sec. - - :param mode: Access permissions for the file. - """ - old_mtime = get_mtime(filename) - atomic_write(content, filename, mode=mode) - new_mtime = pick_new_mtime(old_mtime) - os.utime(filename, (new_mtime, new_mtime)) - - -def get_mtime(filename): - """Return a file's modification time, or None if it does not exist.""" - try: - return os.stat(filename).st_mtime - except OSError as e: - if e.errno == errno.ENOENT: - # File does not exist. Be helpful, return None. - return None - else: - # Other failure. The caller will want to know. - raise - - -def pick_new_mtime(old_mtime=None, starting_age=1000): - """Choose a new modification time for a file that needs it updated. - - This function is used to manage the modification time of files - for which we need to see an increment in the modification time - each time the file is modified. This is the case for DNS zone - files which only get properly reloaded if BIND sees that the - modification time is > to the time it has in its database. - - Modification time can have a resolution as low as one second in - some relevant environments (we have observed this with ext3). - To produce mtime changes regardless, we set a file's modification - time in the past when it is first written, and - increment it by 1 second on each subsequent write. - - However we also want to be careful not to set the modification time - in the future, mostly because BIND does not deal with that very - well. - - :param old_mtime: File's previous modification time, as a number - with a unity of one second, or None if it did not previously - exist. - :param starting_age: If the file did not exist previously, set its - modification time this many seconds in the past. - """ - now = time() - if old_mtime is None: - # File is new. Set modification time in the past to have room for - # sub-second modifications. - return now - starting_age - elif old_mtime + 1 <= now: - # There is room to increment the file's mtime by one second - # without ending up in the future. - return old_mtime + 1 - else: - # We can't increase the file's modification time. Give up and - # return the previous modification time. - return old_mtime +def dict_depth(d, depth=0): + """Returns the max depth of a dictionary.""" + if not isinstance(d, dict) or not d: + return depth + return max(dict_depth(v, depth + 1) for _, v in d.iteritems()) def split_lines(input, separator): @@ -481,24 +308,6 @@ return '\n'.join(lines) + '\n' -def sudo_write_file(filename, contents, encoding='utf-8', mode=0644): - """Write (or overwrite) file as root. USE WITH EXTREME CARE. - - Runs an atomic update using non-interactive `sudo`. This will fail if - it needs to prompt for a password. - """ - raw_contents = contents.encode(encoding) - command = [ - 'sudo', '-n', 'maas-provision', 'atomic-write', - '--filename', filename, - '--mode', oct(mode), - ] - proc = Popen(command, stdin=PIPE) - stdout, stderr = proc.communicate(raw_contents) - if proc.returncode != 0: - raise ExternalProcessError(proc.returncode, command, stderr) - - class Safe: """An object that is safe to render as-is.""" @@ -544,250 +353,6 @@ return quote(rep(value, pos)) -class ActionScript: - """A command-line script that follows a command+verb pattern.""" - - def __init__(self, description): - super(ActionScript, self).__init__() - # See http://docs.python.org/release/2.7/library/argparse.html. - self.parser = ArgumentParser(description=description) - self.subparsers = self.parser.add_subparsers(title="actions") - - @staticmethod - def setup(): - # Ensure stdout and stderr are line-bufferred. - sys.stdout = fdopen(sys.stdout.fileno(), "ab", 1) - sys.stderr = fdopen(sys.stderr.fileno(), "ab", 1) - # Run the SIGINT handler on SIGTERM; `svc -d` sends SIGTERM. - signal.signal(signal.SIGTERM, signal.default_int_handler) - - def register(self, name, handler, *args, **kwargs): - """Register an action for the given name. - - :param name: The name of the action. - :param handler: An object, a module for example, that has `run` and - `add_arguments` callables. The docstring of the `run` callable is - used as the help text for the newly registered action. - :param args: Additional positional arguments for the subparser_. - :param kwargs: Additional named arguments for the subparser_. - - .. _subparser: - http://docs.python.org/ - release/2.7/library/argparse.html#sub-commands - """ - parser = self.subparsers.add_parser( - name, *args, help=handler.run.__doc__, **kwargs) - parser.set_defaults(handler=handler) - handler.add_arguments(parser) - return parser - - def execute(self, argv=None): - """Execute this action. - - This is intended for in-process invocation of an action, though it may - still raise L{SystemExit}. The L{__call__} method is intended for when - this object is executed as a script proper. - """ - args = self.parser.parse_args(argv) - args.handler.run(args) - - def __call__(self, argv=None): - try: - self.setup() - self.execute(argv) - except CalledProcessError as error: - # Print error.cmd and error.output too? - raise SystemExit(error.returncode) - except KeyboardInterrupt: - raise SystemExit(1) - else: - raise SystemExit(0) - - -class MainScript(ActionScript): - """An `ActionScript` that always accepts a `--config-file` option. - - The `--config-file` option defaults to the value of - `MAAS_PROVISIONING_SETTINGS` in the process's environment, or absent - that, `$MAAS_CONFIG_DIR/pserv.yaml` (normally /etc/maas/pserv.yaml for - packaged installations, or when running from branch, the equivalent - inside that branch). - """ - - def __init__(self, description): - # Avoid circular imports. - from provisioningserver.config import Config - - super(MainScript, self).__init__(description) - self.parser.add_argument( - "-c", "--config-file", metavar="FILENAME", - help="Configuration file to load [%(default)s].", - default=Config.DEFAULT_FILENAME) - - -class AtomicWriteScript: - """Wrap the atomic_write function turning it into an ActionScript. - - To use: - >>> main = MainScript(atomic_write.__doc__) - >>> main.register("myscriptname", AtomicWriteScript) - >>> main() - """ - - @staticmethod - def add_arguments(parser): - """Initialise options for writing files atomically. - - :param parser: An instance of :class:`ArgumentParser`. - """ - parser.add_argument( - "--no-overwrite", action="store_true", required=False, - default=False, help="Don't overwrite file if it exists") - parser.add_argument( - "--filename", action="store", required=True, help=( - "The name of the file in which to store contents of stdin")) - parser.add_argument( - "--mode", action="store", required=False, default=None, help=( - "They permissions to set on the file. If not set " - "will be r/w only to owner")) - - @staticmethod - def run(args): - """Take content from stdin and write it atomically to a file.""" - content = sys.stdin.read() - if args.mode is not None: - mode = int(args.mode, 8) - else: - mode = 0600 - atomic_write( - content, args.filename, overwrite=not args.no_overwrite, - mode=mode) - - -def get_all_interface_addresses(): - """For each network interface, yield its IPv4 address.""" - for interface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(interface) - if netifaces.AF_INET in addresses: - for inet_address in addresses[netifaces.AF_INET]: - if "addr" in inet_address: - yield inet_address["addr"] - - -def ensure_dir(path): - """Do the equivalent of `mkdir -p`, creating `path` if it didn't exist.""" - try: - os.makedirs(path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - if not isdir(path): - # Path exists, but isn't a directory. - raise - # Otherwise, the error is that the directory already existed. - # Which is actually success. - - -@contextmanager -def tempdir(suffix=b'', prefix=b'maas-', location=None): - """Context manager: temporary directory. - - Creates a temporary directory (yielding its path, as `unicode`), and - cleans it up again when exiting the context. - - The directory will be readable, writable, and searchable only to the - system user who creates it. - - >>> with tempdir() as playground: - ... my_file = os.path.join(playground, "my-file") - ... with open(my_file, 'wb') as handle: - ... handle.write(b"Hello.\n") - ... files = os.listdir(playground) - >>> files - [u'my-file'] - >>> os.path.isdir(playground) - False - """ - path = tempfile.mkdtemp(suffix, prefix, location) - if isinstance(path, bytes): - path = path.decode(sys.getfilesystemencoding()) - assert isinstance(path, unicode) - try: - yield path - finally: - rmtree(path, ignore_errors=True) - - -def read_text_file(path, encoding='utf-8'): - """Read and decode the text file at the given path.""" - with codecs.open(path, encoding=encoding) as infile: - return infile.read() - - -def write_text_file(path, text, encoding='utf-8'): - """Write the given unicode text to the given file path. - - If the file existed, it will be overwritten. - """ - with codecs.open(path, 'w', encoding) as outfile: - outfile.write(text) - - -def is_compiled_xpath(xpath): - """Is `xpath` a compiled expression?""" - return isinstance(xpath, etree.XPath) - - -def is_compiled_doc(doc): - """Is `doc` a compiled XPath document evaluator?""" - return isinstance(doc, etree.XPathDocumentEvaluator) - - -def match_xpath(xpath, doc): - """Return a match of expression `xpath` against document `doc`. - - :type xpath: Either `unicode` or `etree.XPath` - :type doc: Either `etree._ElementTree` or `etree.XPathDocumentEvaluator` - - :rtype: bool - """ - is_xpath_compiled = is_compiled_xpath(xpath) - is_doc_compiled = is_compiled_doc(doc) - - if is_xpath_compiled and is_doc_compiled: - return doc(xpath.path) - elif is_xpath_compiled: - return xpath(doc) - elif is_doc_compiled: - return doc(xpath) - else: - return doc.xpath(xpath) - - -def try_match_xpath(xpath, doc, logger=logging): - """See if the XPath expression matches the given XML document. - - Invalid XPath expressions are logged, and are returned as a - non-match. - - :type xpath: Either `unicode` or `etree.XPath` - :type doc: Either `etree._ElementTree` or `etree.XPathDocumentEvaluator` - - :rtype: bool - """ - try: - # Evaluating an XPath expression against a document with LXML - # can return a list or a string, and perhaps other types. - # Casting the return value into a boolean context appears to - # be the most reliable way of detecting a match. - return bool(match_xpath(xpath, doc)) - except etree.XPathEvalError: - # Get a plaintext version of `xpath`. - expr = xpath.path if is_compiled_xpath(xpath) else xpath - logger.exception("Invalid expression: %s", expr) - return False - - def classify(func, subjects): """Classify `subjects` according to `func`. @@ -810,43 +375,43 @@ return matched, other -def find_ip_via_arp(mac): - """Find the IP address for `mac` by reading the output of arp -n. +def warn_deprecated(alternative=None): + """Issue a `DeprecationWarning` for the calling function. - Returns `None` if the MAC is not found. - - We do this because we aren't necessarily the only DHCP server on the - network, so we can't check our own leases file and be guaranteed to find an - IP that matches. - - :param mac: The mac address, e.g. '1c:6f:65:d5:56:98'. + :param alternative: Text describing an alternative to using this + deprecated function. """ - - output = call_capture_and_check(['arp', '-n']).split('\n') - - for line in output: - columns = line.split() - if len(columns) == 5 and columns[2] == mac: - return columns[0] - return None + target = getframe(1).f_code.co_name + message = "%s is deprecated" % target + if alternative is None: + message = "%s." % (message,) + else: + message = "%s; %s" % (message, alternative) + warn(message, DeprecationWarning, 1) -def find_mac_via_arp(ip): - """Find the MAC address for `ip` by reading the output of arp -n. +def flatten(*things): + """Recursively flatten iterable parts of `things`. - Returns `None` if the IP is not found. + For example:: - We do this because we aren't necessarily the only DHCP server on the - network, so we can't check our own leases file and be guaranteed to find an - IP that matches. + >>> sorted(flatten([1, 2, {3, 4, (5, 6)}])) + [1, 2, 3, 4, 5, 6] - :param ip: The ip address, e.g. '192.168.1.1'. + :return: An iterator. """ + def _flatten(things): + if isinstance(things, basestring): + # String-like objects are treated as leaves; iterating through a + # string yields more strings, each of which is also iterable, and + # so on, until the heat-death of the universe. + return iter((things,)) + elif isinstance(things, Iterable): + # Recurse and merge in order to flatten nested structures. + return chain.from_iterable(imap(_flatten, things)) + else: + # This is a leaf; return an single-item iterator so that it can be + # chained with any others. + return iter((things,)) - output = call_capture_and_check(['arp', '-n']).split('\n') - - for line in sorted(output): - columns = line.split() - if len(columns) == 5 and columns[0] == ip: - return columns[2] - return None + return _flatten(things) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/isc.py maas-1.7.6+bzr3376/src/provisioningserver/utils/isc.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/isc.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/isc.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,306 @@ +# Copyright (c) 2009, Purdue University +# Copyright (c) 2015, Canonical Ltd. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# Neither the name of the Purdue University nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, +) +from collections import OrderedDict + + +str = None + +__metaclass__ = type +__all__ = [ + 'ISCParseException', + 'make_isc_string', + 'parse_isc_string', + 'read_isc_file', +] + +import copy + + +class ISCParseException(Exception): + """Thrown when an ISC string cannot be parsed.""" + + +def _clip(char_list): + """Clips char_list to individual stanza. + + Inputs: + char_list: partial of char_list from _parse_tokens + + Outputs: + tuple: (int: skip to char list index, list: shortened char_list) + """ + assert char_list[0] == '{' + char_list.pop(0) + skip = 0 + for index, item in enumerate(char_list): + if item == '{': + skip += 1 + elif item == '}' and skip == 0: + return index, char_list[:index] + elif item == '}': + skip -= 1 + raise ISCParseException("Invalid brackets.") + + +def _parse_tokens(char_list): + """Parses exploded isc named.conf portions. + + Inputs: + char_list: List of isc file parts + + Outputs: + dict: fragment or full isc file dict + Recursive dictionary of isc file, dict values can be of 3 types, + dict, string and bool. Boolean values are always true. Booleans are false + if key is absent. Booleans represent situations in isc files such as: + acl "registered" { 10.1.0/32; 10.1.1:/32;}} + + Example: + + {'stanza1 "new"': 'test_info', 'stanza1 "embedded"': {'acl "registered"': + {'10.1.0/32': True, '10.1.1/32': True}}} + """ + index = 0 + dictionary_fragment = OrderedDict() + new_char_list = copy.deepcopy(char_list) + if type(new_char_list) == str: + return new_char_list + if type(new_char_list) == OrderedDict: + return new_char_list + last_open = None + continuous_line = False + temp_list = [] + + # Prevent "may be referenced before assignment" error + key = None + + while index < len(new_char_list): + if new_char_list[index] == '{': + last_open = index + if new_char_list[index] == ';' and continuous_line: + dictionary_fragment = temp_list + temp_list = [] + continuous_line = False + if new_char_list[index] == ';': + continuous_line = False + if (len(new_char_list) > index + 1 and + new_char_list[index] == '}' and + new_char_list[index + 1] != ';'): + skip, value = _clip(new_char_list[last_open:]) + temp_list.append({key: copy.deepcopy(_parse_tokens(value))}) + continuous_line = True + if len(new_char_list) > index + 1 and new_char_list[index + 1] == '{': + # assert key is not None + key = new_char_list.pop(index) + skip, dict_value = _clip(new_char_list[index:]) + if continuous_line: + temp_list.append( + {key: copy.deepcopy(_parse_tokens(dict_value))}) + else: + dictionary_fragment[key] = copy.deepcopy( + _parse_tokens(dict_value)) + index += skip + else: + if len(new_char_list[ + index].split()) == 1 and '{' not in new_char_list: + for item in new_char_list: + if item in [';']: + continue + dictionary_fragment[item] = True + + # If there are more than 1 'keywords' at new_char_list[index] + # ex - "recursion no;" + elif len(new_char_list[index].split()) >= 2: + if type(dictionary_fragment) == list: + raise ISCParseException("Syntax error") + dictionary_fragment[ + new_char_list[index].split()[0]] = ( + ' '.join(new_char_list[index].split()[1:])) + index += 1 + + # If there is just 1 'keyword' at new_char_list[index] + # ex "recursion;" (not a valid option, but for example's sake it's + # fine) + elif new_char_list[index] not in ['{', ';', '}']: + key = new_char_list[index] + if type(dictionary_fragment) == list: + raise ISCParseException("Syntax error") + dictionary_fragment[key] = '' + index += 1 + index += 1 + + return dictionary_fragment + + +def _scrub_comments(isc_string): + """Clears comments from an isc file + + Inputs: + isc_string: string of isc file + Outputs: + string: string of scrubbed isc file + """ + isc_list = [] + if isc_string is None: + return '' + expanded_comment = False + for line in isc_string.split('\n'): + no_comment_line = "" + # Vet out any inline comments + if '/*' in line.strip(): + try: + striped_line = line.strip() + chars = enumerate(striped_line) + while True: + i, c = chars.next() + try: + if c == '/' and striped_line[i + 1] == '*': + expanded_comment = True + chars.next() # Skip '*' + continue + elif c == '*' and striped_line[i + 1] == '/': + expanded_comment = False + chars.next() # Skip '/' + continue + except IndexError: + continue # We are at the end of the line + if expanded_comment: + continue + else: + no_comment_line += c + except StopIteration: + if no_comment_line: + isc_list.append(no_comment_line) + continue + + if expanded_comment: + if '*/' in line.strip(): + expanded_comment = False + isc_list.append(line.split('*/')[-1]) + continue + else: + continue + if line.strip().startswith(('#', '//')): + continue + else: + isc_list.append(line.split('#')[0].split('//')[0].strip()) + return '\n'.join(isc_list) + + +def _explode(isc_string): + """Explodes isc file into relevant tokens. + + Inputs: + isc_string: String of isc file + + Outputs: + list: list of isc file tokens delimited by brackets and semicolons + ['stanza1 "new"', '{', 'test_info', ';', '}'] + """ + str_array = [] + temp_string = [] + for char in isc_string: + if char in ['\n']: + continue + if char in ['{', '}', ';']: + if ''.join(temp_string).strip() == '': + str_array.append(char) + else: + str_array.append(''.join(temp_string).strip()) + str_array.append(char) + temp_string = [] + else: + temp_string.append(char) + return str_array + + +def parse_isc_string(isc_string): + """Makes a dictionary from an ISC file string + + Inputs: + isc_string: string of isc file + + Outputs: + dict: dictionary of ISC file representation + """ + return _parse_tokens(_explode(_scrub_comments(isc_string))) + + +def make_isc_string(isc_dict, terminate=True): + """Outputs an isc formatted file string from a dict + + Inputs: + isc_dict: a recursive dictionary to be turned into an isc file + (from ParseTokens) + + Outputs: + str: string of isc file without indentation + """ + if terminate: + terminator = ';' + else: + terminator = '' + if type(isc_dict) == str: + return isc_dict + isc_list = [] + for option in isc_dict: + if type(isc_dict[option]) == bool: + isc_list.append('%s%s' % (option, terminator)) + elif (type(isc_dict[option]) == str or + type(isc_dict[option]) == unicode): + isc_list.append('%s %s%s' % (option, isc_dict[option], terminator)) + elif type(isc_dict[option]) == list: + new_list = [] + for item in isc_dict[option]: + new_list.append(make_isc_string(item, terminate=False)) + new_list[-1] = '%s%s' % (new_list[-1], terminator) + isc_list.append( + '%s { %s }%s' % (option, ' '.join(new_list), terminator)) + elif (type(isc_dict[option]) == OrderedDict or + type(isc_dict[option]) == dict): + isc_list.append('%s { %s }%s' % ( + option, make_isc_string(isc_dict[option]), terminator)) + return '\n'.join(isc_list) + + +def read_isc_file(isc_file): + """Given the specified filename, parses it to create a dictionary. + + :param:isc_file: the filename to read + :return:dict: dictionary of ISC file representation + """ + with open(isc_file, "r") as f: + return parse_isc_string(f.read()) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/network.py maas-1.7.6+bzr3376/src/provisioningserver/utils/network.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/network.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/network.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,225 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Generic helpers for `netaddr` and network-related types.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'clean_up_netifaces_address', + 'find_ip_via_arp', + 'find_mac_via_arp', + 'get_all_addresses_for_interface', + 'get_all_interface_addresses', + 'make_network', + 'resolve_hostname', + 'intersect_iprange', + 'ip_range_within_network', + ] + + +from socket import ( + AF_INET, + AF_INET6, + EAI_NODATA, + EAI_NONAME, + gaierror, + getaddrinfo, + ) + +from netaddr import ( + IPAddress, + IPNetwork, + IPRange, + ) +import netifaces +from provisioningserver.utils.shell import call_and_check + + +def make_network(ip_address, netmask_or_bits, **kwargs): + """Construct an `IPNetwork` with the given address and netmask or width. + + This is a thin wrapper for the `IPNetwork` constructor. It's here because + the constructor for `IPNetwork` is easy to get wrong. If you pass it an + IP address and a netmask, or an IP address and a bit size, it will seem to + work... but it will pick a default netmask, not the one you specified. + + :param ip_address: + :param netmask_or_bits: + :param kwargs: Any other (keyword) arguments you want to pass to the + `IPNetwork` constructor. + :raise netaddr.core.AddrFormatError: If the network specification is + malformed. + :return: An `IPNetwork` of the given base address and netmask or bit width. + """ + return IPNetwork("%s/%s" % (ip_address, netmask_or_bits), **kwargs) + + +def find_ip_via_arp(mac): + """Find the IP address for `mac` by reading the output of arp -n. + + Returns `None` if the MAC is not found. + + We do this because we aren't necessarily the only DHCP server on the + network, so we can't check our own leases file and be guaranteed to find an + IP that matches. + + :param mac: The mac address, e.g. '1c:6f:65:d5:56:98'. + """ + + output = call_and_check(['arp', '-n']).split('\n') + + for line in sorted(output): + columns = line.split() + if len(columns) == 5 and columns[2].lower() == mac.lower(): + return columns[0] + return None + + +def find_mac_via_arp(ip): + """Find the MAC address for `ip` by reading the output of arp -n. + + Returns `None` if the IP is not found. + + We do this because we aren't necessarily the only DHCP server on the + network, so we can't check our own leases file and be guaranteed to find an + IP that matches. + + :param ip: The ip address, e.g. '192.168.1.1'. + """ + # Normalise ip. IPv6 has a wealth of alternate notations, so we can't + # just look for the string; we have to parse. + ip = IPAddress(ip) + # Use "C" locale; we're parsing output so we don't want any translations. + output = call_and_check(['ip', 'neigh'], env={'LC_ALL': 'C'}) + + for line in sorted(output.splitlines()): + columns = line.split() + if len(columns) < 4: + raise Exception( + "Output line from 'ip neigh' does not look like a neighbour " + "entry: '%s'" % line) + # Normal "ip neigh" output lines look like: + # dev lladdr [router] + # + # Where is an IPv4 or IPv6 address, is a network + # interface name such as eth0, is a MAC address, and status + # can be REACHABLE, STALE, etc. + # + # However sometimes you'll also see lines like: + # dev FAILED + # + # Note the missing lladdr entry. + if IPAddress(columns[0]) == ip and columns[3] == 'lladdr': + # Found matching IP address. Return MAC. + return columns[4] + return None + + +def clean_up_netifaces_address(address, interface): + """Strip extraneous matter from `netifaces` IPv6 address. + + Each IPv6 address we get from `netifaces` has a "zone index": a suffix + consisting of a percent sign and a network interface name, e.g. `eth0` + in GNU/Linux or `0` in Windows. These are normally used to disambiguate + link-local addresses (which have the same network prefix on each link, + but may not actually be connected). `IPAddress` doesn't parse that + suffix, so we strip it off. + """ + return address.replace('%' + interface, '') + + +def get_all_addresses_for_interface(interface): + """Yield all IPv4 and IPv6 addresses for an interface as `IPAddress`es. + + IPv4 addresses will be yielded first, followed by v6 addresses. + + :param interface: The name of the interface whose addresses we + should retrieve. + """ + addresses = netifaces.ifaddresses(interface) + if netifaces.AF_INET in addresses: + for inet_address in addresses[netifaces.AF_INET]: + if "addr" in inet_address: + yield inet_address["addr"] + if netifaces.AF_INET6 in addresses: + for inet6_address in addresses[netifaces.AF_INET6]: + if "addr" in inet6_address: + # There's a bug in netifaces which results in the + # interface name being appended to the IPv6 address. + # Goodness knows why. Anyway, we deal with that + # here. + yield clean_up_netifaces_address( + inet6_address["addr"], interface) + + +def get_all_interface_addresses(): + """For each network interface, yield its addresses.""" + for interface in netifaces.interfaces(): + for address in get_all_addresses_for_interface(interface): + yield address + + +def resolve_hostname(hostname, ip_version=4): + """Wrapper around `getaddrinfo`: return addresses for `hostname`. + + :param hostname: Host name (or IP address). + :param ip_version: Look for addresses of this IP version only: 4 for IPv4, + or 6 for IPv6. + :return: A set of `IPAddress`. Empty if `hostname` does not resolve for + the requested IP version. + """ + addr_families = { + 4: AF_INET, + 6: AF_INET6, + } + assert ip_version in addr_families + # Arbitrary non-privileged port, on which we can call getaddrinfo. + port = 33360 + try: + address_info = getaddrinfo(hostname, port, addr_families[ip_version]) + except gaierror as e: + if e.errno in (EAI_NONAME, EAI_NODATA): + # Name does not resolve. + address_info = [] + else: + raise + + # The contents of sockaddr differ for IPv6 and IPv4, but the + # first element is always the address, and that's all we care + # about. + return { + IPAddress(sockaddr[0]) + for family, socktype, proto, canonname, sockaddr in address_info + } + + +def intersect_iprange(network, iprange): + """Return the intersection between two IPNetworks or IPRanges. + + IPSet is notoriously inefficient so we intersect ourselves here. + """ + if network.last >= iprange.first and network.first <= iprange.last: + first = max(network.first, iprange.first) + last = min(network.last, iprange.last) + return IPRange(first, last) + else: + return None + + +def ip_range_within_network(ip_range, network): + """Check that the whole of a given IP range is within a given network.""" + # Make sure that ip_range is an IPRange and not an IPNetwork, + # otherwise this won't work. + if isinstance(ip_range, IPNetwork): + ip_range = IPRange( + IPAddress(network.first), IPAddress(network.last)) + return all([ + intersect_iprange(cidr, network) for cidr in ip_range.cidrs()]) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/script.py maas-1.7.6+bzr3376/src/provisioningserver/utils/script.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/script.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/script.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,147 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities for adding sub-commands to the MAAS management commands.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'ActionScript', + 'AtomicWriteScript', + 'MainScript', + ] + +from argparse import ArgumentParser +from os import fdopen +import signal +from subprocess import CalledProcessError +import sys + +from provisioningserver.utils.fs import atomic_write + + +class ActionScript: + """A command-line script that follows a command+verb pattern.""" + + def __init__(self, description): + super(ActionScript, self).__init__() + # See http://docs.python.org/release/2.7/library/argparse.html. + self.parser = ArgumentParser(description=description) + self.subparsers = self.parser.add_subparsers(title="actions") + + @staticmethod + def setup(): + # Ensure stdout and stderr are line-bufferred. + sys.stdout = fdopen(sys.stdout.fileno(), "ab", 1) + sys.stderr = fdopen(sys.stderr.fileno(), "ab", 1) + # Run the SIGINT handler on SIGTERM; `svc -d` sends SIGTERM. + signal.signal(signal.SIGTERM, signal.default_int_handler) + + def register(self, name, handler, *args, **kwargs): + """Register an action for the given name. + + :param name: The name of the action. + :param handler: An object, a module for example, that has `run` and + `add_arguments` callables. The docstring of the `run` callable is + used as the help text for the newly registered action. + :param args: Additional positional arguments for the subparser_. + :param kwargs: Additional named arguments for the subparser_. + + .. _subparser: + http://docs.python.org/ + release/2.7/library/argparse.html#sub-commands + """ + parser = self.subparsers.add_parser( + name, *args, help=handler.run.__doc__, **kwargs) + parser.set_defaults(handler=handler) + handler.add_arguments(parser) + return parser + + def execute(self, argv=None): + """Execute this action. + + This is intended for in-process invocation of an action, though it may + still raise L{SystemExit}. The L{__call__} method is intended for when + this object is executed as a script proper. + """ + args = self.parser.parse_args(argv) + args.handler.run(args) + + def __call__(self, argv=None): + try: + self.setup() + self.execute(argv) + except CalledProcessError as error: + # Print error.cmd and error.output too? + raise SystemExit(error.returncode) + except KeyboardInterrupt: + raise SystemExit(1) + else: + raise SystemExit(0) + + +class MainScript(ActionScript): + """An `ActionScript` that always accepts a `--config-file` option. + + The `--config-file` option defaults to the value of + `MAAS_PROVISIONING_SETTINGS` in the process's environment, or absent + that, `$MAAS_CONFIG_DIR/pserv.yaml` (normally /etc/maas/pserv.yaml for + packaged installations, or when running from branch, the equivalent + inside that branch). + """ + + def __init__(self, description): + # Avoid circular imports. + from provisioningserver.config import Config + + super(MainScript, self).__init__(description) + self.parser.add_argument( + "-c", "--config-file", metavar="FILENAME", + help="Configuration file to load [%(default)s].", + default=Config.DEFAULT_FILENAME) + + +class AtomicWriteScript: + """Wrap the atomic_write function turning it into an ActionScript. + + To use: + >>> main = MainScript(atomic_write.__doc__) + >>> main.register("myscriptname", AtomicWriteScript) + >>> main() + """ + + @staticmethod + def add_arguments(parser): + """Initialise options for writing files atomically. + + :param parser: An instance of :class:`ArgumentParser`. + """ + parser.add_argument( + "--no-overwrite", action="store_true", required=False, + default=False, help="Don't overwrite file if it exists") + parser.add_argument( + "--filename", action="store", required=True, help=( + "The name of the file in which to store contents of stdin")) + parser.add_argument( + "--mode", action="store", required=False, default=None, help=( + "They permissions to set on the file. If not set " + "will be r/w only to owner")) + + @staticmethod + def run(args): + """Take content from stdin and write it atomically to a file.""" + content = sys.stdin.read() + if args.mode is not None: + mode = int(args.mode, 8) + else: + mode = 0600 + atomic_write( + content, args.filename, overwrite=not args.no_overwrite, + mode=mode) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/shell.py maas-1.7.6+bzr3376/src/provisioningserver/utils/shell.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/shell.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/shell.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,316 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities for executing external commands.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'call_and_check', + 'ExternalProcessError', + 'pipefork', + 'PipeForkError', + ] + +from contextlib import contextmanager +import cPickle +import os +from pipes import quote +import signal +from string import printable +from subprocess import ( + CalledProcessError, + PIPE, + Popen, + ) +from sys import ( + stderr, + stdout, + ) +from tempfile import TemporaryFile + +from twisted.python.failure import Failure + +# A mapping of signal numbers to names. It is strange that this isn't in the +# standard library (but I did check). +signal_names = { + value: name for name, value in vars(signal).viewitems() + if name.startswith('SIG') and '_' not in name +} + +# A table suitable for use with str.translate() to replace each +# non-printable and non-ASCII character in a byte string with a question +# mark, mimicking the "replace" strategy when encoding and decoding. +non_printable_replace_table = b"".join( + chr(i) if chr(i) in printable else b"?" + for i in xrange(0xff + 0x01)) + + +class ExternalProcessError(CalledProcessError): + """Raised when there's a problem calling an external command. + + Unlike `CalledProcessError`: + + - `__str__()` returns a string containing the output of the failed + external process, if available. All non-printable and non-ASCII + characters are filtered out, replaced by question marks. + + - `__unicode__()` is defined, and tries to return something + analagous to `__str__()` but keeping in valid unicode characters + from the error message. + + """ + + @classmethod + def upgrade(cls, error): + """Upgrade the given error to an instance of this class. + + If `error` is an instance of :py:class:`CalledProcessError`, this will + change its class, in-place, to :py:class:`ExternalProcessError`. + + There are two ways we could have done this: + + 1. Change the class of `error` in-place. + + 2. Capture ``exc_info``, create a new exception based on `error`, then + re-raise with the 3-argument version of ``raise``. + + #1 seems a lot simpler so that's what this method does. The caller + needs then only use a naked ``raise`` to get the utility of this class + without losing the traceback. + """ + if type(error) is CalledProcessError: + error.__class__ = cls + + @staticmethod + def _to_unicode(string): + if isinstance(string, bytes): + return string.decode("ascii", "replace") + else: + return unicode(string) + + @staticmethod + def _to_ascii(string, table=non_printable_replace_table): + if isinstance(string, unicode): + return string.encode("ascii", "replace") + else: + return bytes(string).translate(table) + + def __unicode__(self): + cmd = u" ".join(quote(self._to_unicode(part)) for part in self.cmd) + output = self._to_unicode(self.output) + return u"Command `%s` returned non-zero exit status %d:\n%s" % ( + cmd, self.returncode, output) + + def __str__(self): + cmd = b" ".join(quote(self._to_ascii(part)) for part in self.cmd) + output = self._to_ascii(self.output) + return b"Command `%s` returned non-zero exit status %d:\n%s" % ( + cmd, self.returncode, output) + + @property + def output_as_ascii(self): + """The command's output as printable ASCII. + + Non-printable and non-ASCII characters are filtered out. + """ + return self._to_ascii(self.output) + + @property + def output_as_unicode(self): + """The command's output as Unicode text. + + Invalid Unicode characters are filtered out. + """ + return self._to_unicode(self.output) + + +def call_and_check(command, *args, **kwargs): + """Execute a command, similar to `subprocess.check_call()`. + + :param command: Command line, as a list of strings. + :return: The command's output from standard output. + :raise ExternalProcessError: If the command returns nonzero. + """ + process = Popen(command, *args, stdout=PIPE, stderr=PIPE, **kwargs) + stdout, stderr = process.communicate() + stderr = stderr.strip() + if process.returncode != 0: + raise ExternalProcessError(process.returncode, command, output=stderr) + return stdout + + +class PipeForkError(Exception): + """An error occurred in `pipefork`.""" + + +@contextmanager +def pipefork(): + """Context manager that forks with pipes between parent and child. + + Use like so:: + + with pipefork() as (pid, fin, fout): + if pid == 0: + # This is the child. + ... + else: + # This is the parent. + ... + + Pipes are set up so that the parent can write to the child, and + vice-versa. + + In the child, ``fin`` is a file that reads from the parent, and ``fout`` + is a file that writes to the parent. + + In the parent, ``fin`` is a file that reads from the child, and ``fout`` + is a file that writes to the child. + + Be careful to think about closing these file objects to avoid deadlocks. + For example, the following will deadlock: + + with pipefork() as (pid, fin, fout): + if pid == 0: + fin.read() # Read from the parent. + fout.write(b'Moien') # Greet the parent. + else: + fout.write(b'Hello') # Greet the child. + fin.read() # Read from the child *BLOCKS FOREVER* + + The reason is that the read in the child never returns because the pipe is + never closed. Closing ``fout`` in the parent resolves the problem:: + + with pipefork() as (pid, fin, fout): + if pid == 0: + fin.read() # Read from the parent. + fout.write(b'Moien') # Greet the parent. + else: + fout.write(b'Hello') # Greet the child. + fout.close() # Close the write pipe to the child. + fin.read() # Read from the child. + + Exceptions raised in the child are magically re-raised in the parent. When + the child has died for another reason, a signal perhaps, a `PipeForkError` + is raised with an explanatory message. + + :raises: `PipeForkError` when the child process dies a somewhat unnatural + death, e.g. by a signal or when writing a crash-dump fails. + """ + crashfile = TemporaryFile() + + c2pread, c2pwrite = os.pipe() + p2cread, p2cwrite = os.pipe() + + pid = os.fork() + + if pid == 0: + # Child: this conditional branch runs in the child process. + try: + os.close(c2pread) + os.close(p2cwrite) + + with os.fdopen(p2cread, 'rb') as fin: + with os.fdopen(c2pwrite, 'wb') as fout: + yield pid, fin, fout + + stdout.flush() + stderr.flush() + except SystemExit as se: + # Exit hard, not soft. + os._exit(se.code) + except: + try: + # Pickle error to crash file. + cPickle.dump(Failure(), crashfile, cPickle.HIGHEST_PROTOCOL) + crashfile.flush() + finally: + # Exit hard. + os._exit(2) + finally: + # Exit hard. + os._exit(0) + else: + # Parent: this conditional branch runs in the parent process. + os.close(c2pwrite) + os.close(p2cread) + + with os.fdopen(c2pread, 'rb') as fin: + with os.fdopen(p2cwrite, 'wb') as fout: + yield pid, fin, fout + + # Wait for the child to finish. + _, status = os.waitpid(pid, 0) + signal = (status & 0xff) + code = (status >> 8) & 0xff + + # Check for a saved crash. + crashfile.seek(0) + try: + error = cPickle.load(crashfile) + except EOFError: + # No crash was recorded. + error = None + else: + # Raise exception from child. + error.raiseException() + finally: + crashfile.close() + + if os.WIFSIGNALED(status): + # The child was killed by a signal. + raise PipeForkError( + "Child killed by signal %d (%s)" % ( + signal, signal_names.get(signal, "?"))) + elif code != 0: + # The child exited with a non-zero code. + raise PipeForkError( + "Child exited with code %d" % code) + else: + # All okay. + pass + + +@contextmanager +def objectfork(): + """Like `pipefork`, but objects can be passed between parent and child. + + Usage:: + + with objectfork() as (pid, recv, send): + if pid == 0: + # Child. + for foo in bar(): + send(foo) + send(None) # Done. + else: + for data in iter(recv, None): + ... # Process data. + + In the child, ``recv`` receives objects sent -- via `send` -- from + the parent. + + In the parent, ``recv`` receives objects sent -- via `send` -- from + the child. + + All objects must be picklable. + + See `pipefork` for more details. + """ + with pipefork() as (pid, fin, fout): + + def recv(): + return cPickle.load(fin) + + def send(obj): + cPickle.dump(obj, fout, cPickle.HIGHEST_PROTOCOL) + fout.flush() # cPickle.dump() does not flush. + + yield pid, recv, send diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_curtin.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_curtin.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_curtin.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_curtin.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,120 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for Curtin-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.curtin import ( + compose_mv_command, + compose_recursive_copy, + compose_write_text_file, + ) +from testtools.matchers import ( + AllMatch, + ContainsAll, + IsInstance, + ) + + +class TestComposeMvCommand(MAASTestCase): + + def test__returns_command_list(self): + command = compose_mv_command( + factory.make_name('source'), factory.make_name('dest')) + self.expectThat(command, IsInstance(list)) + self.expectThat(command, AllMatch(IsInstance(unicode))) + + def test__runs_command_in_target(self): + command = compose_mv_command( + factory.make_name('source'), factory.make_name('dest')) + curtin_prefix = ['curtin', 'in-target', '--'] + self.assertEqual(curtin_prefix, command[:len(curtin_prefix)]) + + def test__moves_file(self): + source = factory.make_name('source') + dest = factory.make_name('dest') + command = compose_mv_command(source, dest) + mv_suffix = ['mv', '--', source, dest] + self.assertEqual(mv_suffix, command[-len(mv_suffix):]) + + +class TestComposeRecursiveCopy(MAASTestCase): + + def test__returns_command_list(self): + command = compose_recursive_copy( + factory.make_name('source'), factory.make_name('dest')) + self.expectThat(command, IsInstance(list)) + self.expectThat(command, AllMatch(IsInstance(unicode))) + + def test__runs_command_in_target(self): + command = compose_recursive_copy( + factory.make_name('source'), factory.make_name('dest')) + curtin_prefix = ['curtin', 'in-target', '--'] + self.assertEqual(curtin_prefix, command[:len(curtin_prefix)]) + + def test__copies(self): + source = factory.make_name('source') + dest = factory.make_name('dest') + command = compose_recursive_copy(source, dest) + cp_suffix = ['cp', '-r', '-p', '--', source, dest] + self.assertEqual(cp_suffix, command[-len(cp_suffix):]) + + +class TestComposeWriteTextFile(MAASTestCase): + + def test__returns_complete_write_file_dict(self): + preseed = compose_write_text_file( + factory.make_name('file'), factory.make_name('content')) + self.expectThat(preseed, IsInstance(dict)) + self.expectThat( + preseed.keys(), + ContainsAll(['path', 'content', 'owner', 'permissions'])) + + def test__obeys_path_param(self): + path = factory.make_name('path') + preseed = compose_write_text_file(path, factory.make_name('content')) + self.assertEqual(path, preseed['path']) + + def test__obeys_content_param(self): + content = factory.make_name('content') + preseed = compose_write_text_file(factory.make_name('path'), content) + self.assertEqual(content, preseed['content']) + + def test__defaults_owner_to_root(self): + preseed = compose_write_text_file( + factory.make_name('file'), factory.make_name('content')) + self.assertEqual('root:root', preseed['owner']) + + def test__obeys_owner_param(self): + owner = '%s:%s' % ( + factory.make_name('user'), + factory.make_name('group'), + ) + preseed = compose_write_text_file( + factory.make_name('file'), factory.make_name('content'), + owner=owner) + self.assertEqual(owner, preseed['owner']) + + def test__defaults_permissions_to_0600(self): + preseed = compose_write_text_file( + factory.make_name('file'), factory.make_name('content')) + self.assertEqual('0600', preseed['permissions']) + + def test__obeys_permissions_param(self): + permissions = 0123 + preseed = compose_write_text_file( + factory.make_name('file'), factory.make_name('content'), + permissions=permissions) + self.assertEqual('0123', preseed['permissions']) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_enum.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_enum.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_enum.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_enum.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,104 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for enum-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.enum import ( + map_enum, + map_enum_reverse, + ) + + +class TestEnum(MAASTestCase): + + def test_map_enum_includes_all_enum_values(self): + + class Enum: + ONE = 1 + TWO = 2 + + self.assertItemsEqual(['ONE', 'TWO'], map_enum(Enum).keys()) + + def test_map_enum_omits_private_or_special_methods(self): + + class Enum: + def __init__(self): + pass + + def __repr__(self): + return "Enum" + + def _save(self): + pass + + VALUE = 9 + + self.assertItemsEqual(['VALUE'], map_enum(Enum).keys()) + + def test_map_enum_maps_values(self): + + class Enum: + ONE = 1 + THREE = 3 + + self.assertEqual({'ONE': 1, 'THREE': 3}, map_enum(Enum)) + + def test_map_enum_reverse_maps_values(self): + + class Enum: + ONE = 1 + NINE = 9 + + self.assertEqual( + {1: 'ONE', 9: 'NINE'}, + map_enum_reverse(Enum)) + + def test_map_enum_reverse_ignores_unwanted_keys(self): + + class Enum: + ZERO = 0 + ONE = 1 + + self.assertEqual({0: 'ZERO'}, map_enum_reverse(Enum, ignore=['ONE'])) + + def test_map_enum_reverse_ignores_keys_for_clashing_values(self): + # This enum has two keys for each of its values. We'll make the + # mapping ignore the duplicates. The values are still mapped, but + # only to the non-ignored keys. + # We jumble up the ordering a bit to try and trip up any bugs. The + # nondeterministic traversal order of a dict may accidentally hide + # bugs if the order is too predictable. + class Enum: + ONE = 1 + FIVE = 5 + ONE_2 = 1 + TWO = 2 + THREE_2 = 3 + THREE = 3 + FOUR_2 = 4 + TWO_2 = 2 + FOUR = 4 + FIVE_2 = 5 + + self.assertEqual( + {1: 'ONE', 2: 'TWO', 3: 'THREE', 4: 'FOUR', 5: 'FIVE'}, + map_enum_reverse( + Enum, ignore=[ + 'ONE_2', + 'TWO_2', + 'THREE_2', + 'FOUR_2', + 'FIVE_2', + ])) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_env.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_env.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_env.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_env.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,80 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for environment-related helpers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.env import environment_variables +from testtools.testcase import ExpectedException + + +class TestEnvironmentVariables(MAASTestCase): + """Tests for `environment_variables`.""" + + def make_variable(self): + return factory.make_name('testvar'), factory.make_name('value') + + def test__sets_variables(self): + var, value = self.make_variable() + with environment_variables({var: value}): + env = os.environ.copy() + self.assertEqual(value, env[var]) + + def test__overrides_prior_values(self): + var, prior_value = self.make_variable() + temp_value = factory.make_name('temp-value') + with environment_variables({var: prior_value}): + with environment_variables({var: temp_value}): + env = os.environ.copy() + self.assertEqual(temp_value, env[var]) + + def test__leaves_other_variables_intact(self): + untouched_var, untouched_value = self.make_variable() + var, value = self.make_variable() + with environment_variables({untouched_var: untouched_value}): + with environment_variables({var: value}): + env = os.environ.copy() + self.assertEqual(untouched_value, env[untouched_var]) + + def test__restores_variables_to_previous_values(self): + var, prior_value = self.make_variable() + temp_value = factory.make_name('temp-value') + with environment_variables({var: prior_value}): + with environment_variables({var: temp_value}): + pass + env = os.environ.copy() + self.assertEqual(prior_value, env[var]) + + def test__restores_previously_unset_variables_to_being_unset(self): + var, value = self.make_variable() + self.assertNotIn(var, os.environ) + with environment_variables({var: value}): + pass + self.assertNotIn(var, os.environ) + + def test__restores_even_after_exception(self): + var, value = self.make_variable() + self.assertNotIn(var, os.environ) + + class DeliberateException(Exception): + pass + + with ExpectedException(DeliberateException): + with environment_variables({var: value}): + raise DeliberateException() + + self.assertNotIn(var, os.environ) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_fs.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_fs.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_fs.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_fs.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,486 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for filesystem-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os.path +from random import randint +from shutil import rmtree +import stat +from subprocess import ( + CalledProcessError, + PIPE, + ) +import sys +import tempfile +import time + +from maastesting.factory import factory +from maastesting.fakemethod import FakeMethod +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import Mock +from provisioningserver.utils.fs import ( + atomic_symlink, + atomic_write, + ensure_dir, + get_mtime, + incremental_write, + pick_new_mtime, + read_text_file, + sudo_write_file, + tempdir, + write_text_file, + ) +import provisioningserver.utils.fs as fs_module +from testtools.matchers import ( + DirExists, + EndsWith, + FileContains, + FileExists, + Not, + SamePath, + StartsWith, + ) +from testtools.testcase import ExpectedException + + +class TestAtomicWrite(MAASTestCase): + """Test `atomic_write`.""" + + def test_atomic_write_overwrites_dest_file(self): + content = factory.make_string() + filename = self.make_file(contents=factory.make_string()) + atomic_write(content, filename) + self.assertThat(filename, FileContains(content)) + + def test_atomic_write_does_not_overwrite_file_if_overwrite_false(self): + content = factory.make_string() + random_content = factory.make_string() + filename = self.make_file(contents=random_content) + atomic_write(content, filename, overwrite=False) + self.assertThat(filename, FileContains(random_content)) + + def test_atomic_write_writes_file_if_no_file_present(self): + filename = os.path.join(self.make_dir(), factory.make_string()) + content = factory.make_string() + atomic_write(content, filename, overwrite=False) + self.assertThat(filename, FileContains(content)) + + def test_atomic_write_does_not_leak_temp_file_when_not_overwriting(self): + # If the file is not written because it already exists and + # overwriting was disabled, atomic_write does not leak its + # temporary file. + filename = self.make_file() + atomic_write(factory.make_string(), filename, overwrite=False) + self.assertEqual( + [os.path.basename(filename)], + os.listdir(os.path.dirname(filename))) + + def test_atomic_write_does_not_leak_temp_file_on_failure(self): + # If the overwrite fails, atomic_write does not leak its + # temporary file. + self.patch(os, 'rename', Mock(side_effect=OSError())) + filename = self.make_file() + with ExpectedException(OSError): + atomic_write(factory.make_string(), filename) + self.assertEqual( + [os.path.basename(filename)], + os.listdir(os.path.dirname(filename))) + + def test_atomic_write_sets_permissions(self): + atomic_file = self.make_file() + # Pick an unusual mode that is also likely to fall outside our + # umask. We want this mode set, not treated as advice that may + # be tightened up by umask later. + mode = 0323 + atomic_write(factory.make_string(), atomic_file, mode=mode) + self.assertEqual(mode, stat.S_IMODE(os.stat(atomic_file).st_mode)) + + def test_atomic_write_sets_permissions_before_moving_into_place(self): + + recorded_modes = [] + + def record_mode(source, dest): + """Stub for os.rename: get source file's access mode.""" + recorded_modes.append(os.stat(source).st_mode) + + self.patch(os, 'rename', Mock(side_effect=record_mode)) + playground = self.make_dir() + atomic_file = os.path.join(playground, factory.make_name('atomic')) + mode = 0323 + atomic_write(factory.make_string(), atomic_file, mode=mode) + [recorded_mode] = recorded_modes + self.assertEqual(mode, stat.S_IMODE(recorded_mode)) + + def test_atomic_write_sets_OSError_filename_if_undefined(self): + # When the filename attribute of an OSError is undefined when + # attempting to create a temporary file, atomic_write fills it in with + # a representative filename, similar to the specification required by + # mktemp(1). + mock_mkstemp = self.patch(tempfile, "mkstemp") + mock_mkstemp.side_effect = OSError() + filename = os.path.join("directory", "basename") + error = self.assertRaises(OSError, atomic_write, "content", filename) + self.assertEqual( + os.path.join("directory", ".basename.XXXXXX.tmp"), + error.filename) + + def test_atomic_write_does_not_set_OSError_filename_if_defined(self): + # When the filename attribute of an OSError is defined when attempting + # to create a temporary file, atomic_write leaves it alone. + mock_mkstemp = self.patch(tempfile, "mkstemp") + mock_mkstemp.side_effect = OSError() + mock_mkstemp.side_effect.filename = factory.make_name("filename") + filename = os.path.join("directory", "basename") + error = self.assertRaises(OSError, atomic_write, "content", filename) + self.assertEqual( + mock_mkstemp.side_effect.filename, + error.filename) + + +class TestAtomicSymlink(MAASTestCase): + """Test `atomic_symlink`.""" + + def test_atomic_symlink_creates_symlink(self): + filename = self.make_file(contents=factory.make_string()) + target_dir = self.make_dir() + link_name = factory.make_name('link') + target = os.path.join(target_dir, link_name) + atomic_symlink(filename, target) + self.assertTrue( + os.path.islink(target), "atomic_symlink didn't create a symlink") + self.assertThat(target, SamePath(filename)) + + def test_atomic_symlink_overwrites_dest_file(self): + filename = self.make_file(contents=factory.make_string()) + target_dir = self.make_dir() + link_name = factory.make_name('link') + # Create a file that will be overwritten. + factory.make_file(location=target_dir, name=link_name) + target = os.path.join(target_dir, link_name) + atomic_symlink(filename, target) + self.assertTrue( + os.path.islink(target), "atomic_symlink didn't create a symlink") + self.assertThat(target, SamePath(filename)) + + def test_atomic_symlink_does_not_leak_temp_file_if_failure(self): + # In the face of failure, no temp file is leaked. + self.patch(os, 'rename', Mock(side_effect=OSError())) + filename = self.make_file() + target_dir = self.make_dir() + link_name = factory.make_name('link') + target = os.path.join(target_dir, link_name) + with ExpectedException(OSError): + atomic_symlink(filename, target) + self.assertEqual( + [], + os.listdir(target_dir)) + + +class TestIncrementalWrite(MAASTestCase): + """Test `incremental_write`.""" + + def test_incremental_write_increments_modification_time(self): + content = factory.make_string() + filename = self.make_file(contents=factory.make_string()) + # Pretend that this file is older than it is. So that + # incrementing its mtime won't put it in the future. + old_mtime = os.stat(filename).st_mtime - 10 + os.utime(filename, (old_mtime, old_mtime)) + incremental_write(content, filename) + self.assertAlmostEqual( + os.stat(filename).st_mtime, old_mtime + 1, delta=0.01) + + def test_incremental_write_sets_permissions(self): + atomic_file = self.make_file() + mode = 0323 + incremental_write(factory.make_string(), atomic_file, mode=mode) + self.assertEqual(mode, stat.S_IMODE(os.stat(atomic_file).st_mode)) + + +class TestGetMTime(MAASTestCase): + """Test `get_mtime`.""" + + def test_get_mtime_returns_None_for_nonexistent_file(self): + nonexistent_file = os.path.join( + self.make_dir(), factory.make_name('nonexistent-file')) + self.assertIsNone(get_mtime(nonexistent_file)) + + def test_get_mtime_returns_mtime(self): + existing_file = self.make_file() + mtime = os.stat(existing_file).st_mtime - randint(0, 100) + os.utime(existing_file, (mtime, mtime)) + # Some small rounding/representation errors can happen here. + # That's just the way of floating-point numbers. According to + # Gavin there's a conversion to fixed-point along the way, which + # would raise representability issues. + self.assertAlmostEqual(mtime, get_mtime(existing_file), delta=0.00001) + + def test_get_mtime_passes_on_other_error(self): + forbidden_file = self.make_file() + self.patch(os, 'stat', FakeMethod(failure=OSError("Forbidden file"))) + self.assertRaises(OSError, get_mtime, forbidden_file) + + +class TestPickNewMTime(MAASTestCase): + """Test `pick_new_mtime`.""" + + def test_pick_new_mtime_applies_starting_age_to_new_file(self): + before = time.time() + starting_age = randint(0, 5) + recommended_age = pick_new_mtime(None, starting_age=starting_age) + now = time.time() + self.assertAlmostEqual( + now - starting_age, + recommended_age, + delta=(now - before)) + + def test_pick_new_mtime_increments_mtime_if_possible(self): + past = time.time() - 2 + self.assertEqual(past + 1, pick_new_mtime(past)) + + def test_pick_new_mtime_refuses_to_move_mtime_into_the_future(self): + # Race condition: this will fail if the test gets held up for + # a second between readings of the clock. + now = time.time() + self.assertEqual(now, pick_new_mtime(now)) + + +class TestSudoWriteFile(MAASTestCase): + """Testing for `sudo_write_file`.""" + + def patch_popen(self, return_value=0): + process = Mock() + process.returncode = return_value + process.communicate = Mock(return_value=('output', 'error output')) + self.patch(fs_module, 'Popen', Mock(return_value=process)) + return process + + def test_calls_atomic_write(self): + self.patch_popen() + path = os.path.join(self.make_dir(), factory.make_name('file')) + contents = factory.make_string() + + sudo_write_file(path, contents) + + self.assertThat(fs_module.Popen, MockCalledOnceWith([ + 'sudo', '-n', 'maas-provision', 'atomic-write', + '--filename', path, '--mode', '0644', + ], + stdin=PIPE)) + + def test_encodes_contents(self): + process = self.patch_popen() + contents = factory.make_string() + encoding = 'utf-16' + sudo_write_file(self.make_file(), contents, encoding=encoding) + self.assertThat( + process.communicate, + MockCalledOnceWith(contents.encode(encoding))) + + def test_catches_failures(self): + self.patch_popen(1) + self.assertRaises( + CalledProcessError, + sudo_write_file, self.make_file(), factory.make_string()) + + +class TestEnsureDir(MAASTestCase): + def test_succeeds_if_directory_already_existed(self): + path = self.make_dir() + ensure_dir(path) + self.assertThat(path, DirExists()) + + def test_fails_if_path_is_already_a_file(self): + path = self.make_file() + self.assertRaises(OSError, ensure_dir, path) + self.assertThat(path, FileExists()) + + def test_creates_dir_if_not_present(self): + path = os.path.join(self.make_dir(), factory.make_name()) + ensure_dir(path) + self.assertThat(path, DirExists()) + + def test_passes_on_other_errors(self): + not_a_dir = self.make_file() + self.assertRaises( + OSError, + ensure_dir, + os.path.join(not_a_dir, factory.make_name('impossible'))) + + def test_creates_multiple_layers_of_directories_if_needed(self): + path = os.path.join( + self.make_dir(), factory.make_name('subdir'), + factory.make_name('sbusubdir')) + ensure_dir(path) + self.assertThat(path, DirExists()) + + +class TestTempDir(MAASTestCase): + def test_creates_real_fresh_directory(self): + stored_text = factory.make_string() + filename = factory.make_name('test-file') + with tempdir() as directory: + self.assertThat(directory, DirExists()) + write_text_file(os.path.join(directory, filename), stored_text) + retrieved_text = read_text_file(os.path.join(directory, filename)) + files = os.listdir(directory) + + self.assertEqual(stored_text, retrieved_text) + self.assertEqual([filename], files) + + def test_creates_unique_directory(self): + with tempdir() as dir1, tempdir() as dir2: + pass + self.assertNotEqual(dir1, dir2) + + def test_cleans_up_on_successful_exit(self): + with tempdir() as directory: + file_path = factory.make_file(directory) + + self.assertThat(directory, Not(DirExists())) + self.assertThat(file_path, Not(FileExists())) + + def test_cleans_up_on_exception_exit(self): + class DeliberateFailure(Exception): + pass + + with ExpectedException(DeliberateFailure): + with tempdir() as directory: + file_path = factory.make_file(directory) + raise DeliberateFailure("Exiting context by exception") + + self.assertThat(directory, Not(DirExists())) + self.assertThat(file_path, Not(FileExists())) + + def test_tolerates_disappearing_dir(self): + with tempdir() as directory: + rmtree(directory) + + self.assertThat(directory, Not(DirExists())) + + def test_uses_location(self): + temp_location = self.make_dir() + with tempdir(location=temp_location) as directory: + self.assertThat(directory, DirExists()) + location_listing = os.listdir(temp_location) + + self.assertNotEqual(temp_location, directory) + self.assertThat(directory, StartsWith(temp_location + os.path.sep)) + self.assertIn(os.path.basename(directory), location_listing) + self.assertThat(temp_location, DirExists()) + self.assertThat(directory, Not(DirExists())) + + def test_yields_unicode(self): + with tempdir() as directory: + pass + + self.assertIsInstance(directory, unicode) + + def test_accepts_unicode_from_mkdtemp(self): + fake_dir = os.path.join(self.make_dir(), factory.make_name('tempdir')) + self.assertIsInstance(fake_dir, unicode) + self.patch(tempfile, 'mkdtemp').return_value = fake_dir + + with tempdir() as directory: + pass + + self.assertEqual(fake_dir, directory) + self.assertIsInstance(directory, unicode) + + def test_decodes_bytes_from_mkdtemp(self): + encoding = 'utf-16' + self.patch(sys, 'getfilesystemencoding').return_value = encoding + fake_dir = os.path.join(self.make_dir(), factory.make_name('tempdir')) + self.patch(tempfile, 'mkdtemp').return_value = fake_dir.encode( + encoding) + self.patch(fs_module, 'rmtree') + + with tempdir() as directory: + pass + + self.assertEqual(fake_dir, directory) + self.assertIsInstance(directory, unicode) + + def test_uses_prefix(self): + prefix = factory.make_string(3) + with tempdir(prefix=prefix) as directory: + pass + + self.assertThat(os.path.basename(directory), StartsWith(prefix)) + + def test_uses_suffix(self): + suffix = factory.make_string(3) + with tempdir(suffix=suffix) as directory: + pass + + self.assertThat(os.path.basename(directory), EndsWith(suffix)) + + def test_restricts_access(self): + with tempdir() as directory: + mode = os.stat(directory).st_mode + self.assertEqual( + stat.S_IMODE(mode), + stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + + +class TestReadTextFile(MAASTestCase): + def test_reads_file(self): + text = factory.make_string() + self.assertEqual(text, read_text_file(self.make_file(contents=text))) + + def test_defaults_to_utf8(self): + # Test input: "registered trademark" (ringed R) symbol. + text = '\xae' + self.assertEqual( + text, + read_text_file(self.make_file(contents=text.encode('utf-8')))) + + def test_uses_given_encoding(self): + # Test input: "registered trademark" (ringed R) symbol. + text = '\xae' + self.assertEqual( + text, + read_text_file( + self.make_file(contents=text.encode('utf-16')), + encoding='utf-16')) + + +class TestWriteTextFile(MAASTestCase): + def test_creates_file(self): + path = os.path.join(self.make_dir(), factory.make_name('text')) + text = factory.make_string() + write_text_file(path, text) + self.assertThat(path, FileContains(text)) + + def test_overwrites_file(self): + path = self.make_file(contents="original text") + text = factory.make_string() + write_text_file(path, text) + self.assertThat(path, FileContains(text)) + + def test_defaults_to_utf8(self): + path = self.make_file() + # Test input: "registered trademark" (ringed R) symbol. + text = '\xae' + write_text_file(path, text) + self.assertThat(path, FileContains(text.encode('utf-8'))) + + def test_uses_given_encoding(self): + path = self.make_file() + # Test input: "registered trademark" (ringed R) symbol. + text = '\xae' + write_text_file(path, text, encoding='utf-16') + self.assertThat(path, FileContains(text.encode('utf-16'))) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_isc.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_isc.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_isc.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_isc.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,276 @@ +# Copyright 2015 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test ISC configuration file parser/generator.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, +) + + +str = None + +__metaclass__ = type +__all__ = [] + +from collections import OrderedDict +from textwrap import dedent + +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.isc import ( + ISCParseException, + make_isc_string, + parse_isc_string, + read_isc_file, + ) +from testtools import ExpectedException + + +class TestParseISCString(MAASTestCase): + + def test_parses_simple_bind_options(self): + testdata = dedent("""\ + options { + directory "/var/cache/bind"; + + dnssec-validation auto; + + auth-nxdomain no; # conform to RFC1035 + listen-on-v6 { any; }; + }; + """) + options = parse_isc_string(testdata) + self.assertEqual( + OrderedDict({u'options': OrderedDict({u'auth-nxdomain': u'no', + u'directory': u'"/var/cache/bind"', + u'dnssec-validation': u'auto', + u'listen-on-v6': OrderedDict({u'any': True})})}), + options) + + def test_parses_bind_acl(self): + testdata = dedent("""\ + acl goodclients { + 192.0.2.0/24; + localhost; + localnets; + }; + """) + acl = parse_isc_string(testdata) + self.assertEqual( + {u'acl goodclients': {u'192.0.2.0/24': True, + u'localhost': True, + u'localnets': True}}, acl) + + def test_parses_multiple_forwarders(self): + testdata = dedent("""\ + forwarders { + 91.189.94.2; + 91.189.94.3; + 91.189.94.4; + 91.189.94.5; + 91.189.94.6; + }; + """) + forwarders = parse_isc_string(testdata) + self.assertEqual( + {u'forwarders': {u'91.189.94.2': True, + u'91.189.94.3': True, + u'91.189.94.4': True, + u'91.189.94.5': True, + u'91.189.94.6': True}}, forwarders) + + def test_parses_bug_1413388_config(self): + testdata = dedent("""\ + acl canonical-int-ns { 91.189.90.151; 91.189.89.192; }; + + options { + directory "/var/cache/bind"; + + forwarders { + 91.189.94.2; + 91.189.94.2; + }; + + dnssec-validation auto; + + auth-nxdomain no; # conform to RFC1035 + listen-on-v6 { any; }; + + allow-query { any; }; + allow-transfer { 10.222.64.1; canonical-int-ns; }; + + notify explicit; + also-notify { 91.189.90.151; 91.189.89.192; }; + + allow-query-cache { 10.222.64.0/18; }; + recursion yes; + }; + + zone "." { type master; file "/etc/bind/db.special"; }; + """) + config = parse_isc_string(testdata) + self.assertEqual( + {u'acl canonical-int-ns': + {u'91.189.89.192': True, u'91.189.90.151': True}, + u'options': {u'allow-query': {u'any': True}, + u'allow-query-cache': {u'10.222.64.0/18': True}, + u'allow-transfer': {u'10.222.64.1': True, + u'canonical-int-ns': True}, + u'also-notify': {u'91.189.89.192': True, + u'91.189.90.151': True}, + u'auth-nxdomain': u'no', + u'directory': u'"/var/cache/bind"', + u'dnssec-validation': u'auto', + u'forwarders': {u'91.189.94.2': True}, + u'listen-on-v6': {u'any': True}, + u'notify': u'explicit', + u'recursion': u'yes'}, + u'zone "."': + {u'file': u'"/etc/bind/db.special"', u'type': u'master'}}, + config) + + def test_parse_then_make_then_parse_generates_identical_config(self): + testdata = dedent("""\ + acl canonical-int-ns { 91.189.90.151; 91.189.89.192; }; + + options { + directory "/var/cache/bind"; + + forwarders { + 91.189.94.2; + 91.189.94.2; + }; + + dnssec-validation auto; + + auth-nxdomain no; # conform to RFC1035 + listen-on-v6 { any; }; + + allow-query { any; }; + allow-transfer { 10.222.64.1; canonical-int-ns; }; + + notify explicit; + also-notify { 91.189.90.151; 91.189.89.192; }; + + allow-query-cache { 10.222.64.0/18; }; + recursion yes; + }; + + zone "." { type master; file "/etc/bind/db.special"; }; + """) + config = parse_isc_string(testdata) + config_string = make_isc_string(config) + config = parse_isc_string(config_string) + self.assertEqual( + OrderedDict( + [(u'acl canonical-int-ns', + OrderedDict( + [(u'91.189.90.151', True), (u'91.189.89.192', True)])), + (u'options', OrderedDict( + [(u'directory', u'"/var/cache/bind"'), + (u'forwarders', OrderedDict( + [(u'91.189.94.2', True)])), + (u'dnssec-validation', u'auto'), + (u'auth-nxdomain', u'no'), + (u'listen-on-v6', OrderedDict([(u'any', True)])), + (u'allow-query', OrderedDict([(u'any', True)])), + (u'allow-transfer', OrderedDict( + [(u'10.222.64.1', True), + (u'canonical-int-ns', True)])), + (u'notify', u'explicit'), + (u'also-notify', OrderedDict( + [(u'91.189.90.151', True), + (u'91.189.89.192', True)])), + (u'allow-query-cache', OrderedDict( + [(u'10.222.64.0/18', True)])), + (u'recursion', u'yes')])), + (u'zone "."', OrderedDict( + [(u'type', u'master'), + (u'file', u'"/etc/bind/db.special"')]))]), + config) + + def test_parser_preserves_order(self): + testdata = dedent("""\ + forwarders { + 9.9.9.9; + 8.8.8.8; + 7.7.7.7; + 6.6.6.6; + 5.5.5.5; + 4.4.4.4; + 3.3.3.3; + 2.2.2.2; + 1.1.1.1; + }; + """) + forwarders = parse_isc_string(testdata) + self.assertEqual(OrderedDict([(u'forwarders', OrderedDict( + [(u'9.9.9.9', True), (u'8.8.8.8', True), (u'7.7.7.7', True), + (u'6.6.6.6', True), (u'5.5.5.5', True), (u'4.4.4.4', True), + (u'3.3.3.3', True), (u'2.2.2.2', True), (u'1.1.1.1', True)]))]), + forwarders) + + def test_parse_unmatched_brackets_throws_iscparseexception(self): + with ExpectedException(ISCParseException): + parse_isc_string("forwarders {") + + def test_parse_malformed_list_throws_iscparseexception(self): + with ExpectedException(ISCParseException): + parse_isc_string("forwarders {{}a;;b}") + + def test_parse_forgotten_semicolons_throw_iscparseexception(self): + with ExpectedException(ISCParseException): + parse_isc_string("a { b; } { c; } d e;") + + def test_read_isc_file(self): + testdata = dedent("""\ + acl canonical-int-ns { 91.189.90.151; 91.189.89.192; }; + + options { + directory "/var/cache/bind"; + + forwarders { + 91.189.94.2; + 91.189.94.2; + }; + + dnssec-validation auto; + + auth-nxdomain no; # conform to RFC1035 + listen-on-v6 { any; }; + + allow-query { any; }; + allow-transfer { 10.222.64.1; canonical-int-ns; }; + + notify explicit; + also-notify { 91.189.90.151; 91.189.89.192; }; + + allow-query-cache { 10.222.64.0/18; }; + recursion yes; + }; + + zone "." { type master; file "/etc/bind/db.special"; }; + """) + testfile = self.make_file(contents=testdata) + parsed = read_isc_file(testfile) + self.assertEqual( + {u'acl canonical-int-ns': + {u'91.189.89.192': True, u'91.189.90.151': True}, + u'options': {u'allow-query': {u'any': True}, + u'allow-query-cache': {u'10.222.64.0/18': True}, + u'allow-transfer': {u'10.222.64.1': True, + u'canonical-int-ns': True}, + u'also-notify': {u'91.189.89.192': True, + u'91.189.90.151': True}, + u'auth-nxdomain': u'no', + u'directory': u'"/var/cache/bind"', + u'dnssec-validation': u'auto', + u'forwarders': {u'91.189.94.2': True}, + u'listen-on-v6': {u'any': True}, + u'notify': u'explicit', + u'recursion': u'yes'}, + u'zone "."': + {u'file': u'"/etc/bind/db.special"', u'type': u'master'}}, + parsed) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_network.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_network.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_network.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_network.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,489 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for network helpers.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from socket import ( + EAI_BADFLAGS, + EAI_NODATA, + EAI_NONAME, + gaierror, + ) + +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +import mock +from netaddr import ( + IPAddress, + IPNetwork, + IPRange, + ) +import netifaces +from netifaces import ( + AF_LINK, + AF_INET, + AF_INET6, + ) +import provisioningserver.utils +from provisioningserver.utils import network as network_module +from provisioningserver.utils.network import ( + clean_up_netifaces_address, + find_ip_via_arp, + find_mac_via_arp, + get_all_addresses_for_interface, + get_all_interface_addresses, + intersect_iprange, + ip_range_within_network, + make_network, + resolve_hostname, + ) +from testtools.matchers import Equals + + +class TestMakeNetwork(MAASTestCase): + + def test_constructs_IPNetwork(self): + network = make_network('10.22.82.0', 24) + self.assertIsInstance(network, IPNetwork) + self.assertEqual(IPNetwork('10.22.82.0/24'), network) + + def test_passes_args_to_IPNetwork(self): + self.patch(network_module, 'IPNetwork') + make_network('10.1.2.0', 24, foo=9) + self.assertEqual( + [mock.call('10.1.2.0/24', foo=9)], + network_module.IPNetwork.mock_calls) + + +class TestFindIPViaARP(MAASTestCase): + + def patch_call(self, output): + """Replace `call_and_check` with one that returns `output`.""" + fake = self.patch(network_module, 'call_and_check') + fake.return_value = output + return fake + + def test__resolves_MAC_address_to_IP(self): + sample = """\ + Address HWtype HWaddress Flags Mask Iface + 192.168.100.20 (incomplete) virbr1 + 192.168.0.104 (incomplete) eth0 + 192.168.0.5 (incomplete) eth0 + 192.168.0.2 (incomplete) eth0 + 192.168.0.100 (incomplete) eth0 + 192.168.122.20 ether 52:54:00:02:86:4b C virbr0 + 192.168.0.4 (incomplete) eth0 + 192.168.0.1 ether 90:f6:52:f6:17:92 C eth0 + """ + + call_and_check = self.patch_call(sample) + ip_address_observed = find_ip_via_arp("90:f6:52:f6:17:92") + self.assertThat(call_and_check, MockCalledOnceWith(['arp', '-n'])) + self.assertEqual("192.168.0.1", ip_address_observed) + + def test__returns_consistent_output(self): + mac = factory.make_mac_address() + ips = [ + '10.0.0.11', + '10.0.0.99', + ] + lines = ['%s ether %s C eth0' % (ip, mac) for ip in ips] + self.patch_call('\n'.join(lines)) + one_result = find_ip_via_arp(mac) + self.patch_call('\n'.join(reversed(lines))) + other_result = find_ip_via_arp(mac) + + self.assertIn(one_result, ips) + self.assertEqual(one_result, other_result) + + def test__ignores_case(self): + sample = """\ + 192.168.0.1 ether 90:f6:52:f6:17:92 C eth0 + """ + self.patch_call(sample) + ip_address_observed = find_ip_via_arp("90:f6:52:f6:17:92".upper()) + self.assertEqual("192.168.0.1", ip_address_observed) + + +class TestFindMACViaARP(MAASTestCase): + + def patch_call(self, output): + """Replace `call_and_check` with one that returns `output`.""" + fake = self.patch(provisioningserver.utils.network, 'call_and_check') + fake.return_value = output + return fake + + def make_output_line(self, ip=None, mac=None, dev=None): + """Compose an `ip neigh` output line for given `ip` and `mac`.""" + if ip is None: + ip = factory.make_ipv4_address() + if mac is None: + mac = factory.make_mac_address() + if dev is None: + dev = factory.make_name('eth', sep='') + return "%(ip)s dev %(dev)s lladdr %(mac)s\n" % { + 'ip': ip, + 'dev': dev, + 'mac': mac, + } + + def test__calls_ip_neigh(self): + call_and_check = self.patch_call('') + find_mac_via_arp(factory.make_ipv4_address()) + self.assertThat( + call_and_check, + MockCalledOnceWith(['ip', 'neigh'], env={'LC_ALL': 'C'})) + + def test__works_with_real_call(self): + find_mac_via_arp(factory.make_ipv4_address()) + # No error. + pass + + def test__fails_on_nonsensical_output(self): + self.patch_call("Weird output...") + self.assertRaises( + Exception, find_mac_via_arp, factory.make_ipv4_address()) + + def test__returns_None_if_not_found(self): + self.patch_call(self.make_output_line()) + self.assertIsNone(find_mac_via_arp(factory.make_ipv4_address())) + + def test__resolves_IPv4_address_to_MAC(self): + sample = "10.55.60.9 dev eth0 lladdr 3c:41:92:68:2e:00 REACHABLE\n" + self.patch_call(sample) + mac_address_observed = find_mac_via_arp('10.55.60.9') + self.assertEqual('3c:41:92:68:2e:00', mac_address_observed) + + def test__resolves_IPv6_address_to_MAC(self): + sample = ( + "fd10::a76:d7fe:fe93:7cb dev eth0 lladdr 3c:41:92:6b:2e:00 " + "REACHABLE\n") + self.patch_call(sample) + mac_address_observed = find_mac_via_arp('fd10::a76:d7fe:fe93:7cb') + self.assertEqual('3c:41:92:6b:2e:00', mac_address_observed) + + def test__ignores_failed_neighbours(self): + ip = factory.make_ipv4_address() + self.patch_call("%s dev eth0 FAILED\n" % ip) + self.assertIsNone(find_mac_via_arp(ip)) + + def test__is_not_fooled_by_prefixing(self): + self.patch_call(self.make_output_line('10.1.1.10')) + self.assertIsNone(find_mac_via_arp('10.1.1.1')) + self.assertIsNone(find_mac_via_arp('10.1.1.100')) + + def test__is_not_fooled_by_different_notations(self): + mac = factory.make_mac_address() + self.patch_call(self.make_output_line('9::0:05', mac=mac)) + self.assertEqual(mac, find_mac_via_arp('09:0::5')) + + def test__returns_consistent_output(self): + ip = factory.make_ipv4_address() + macs = [ + '52:54:00:02:86:4b', + '90:f6:52:f6:17:92', + ] + lines = [self.make_output_line(ip, mac) for mac in macs] + self.patch_call(''.join(lines)) + one_result = find_mac_via_arp(ip) + self.patch_call(''.join(reversed(lines))) + other_result = find_mac_via_arp(ip) + + self.assertIn(one_result, macs) + self.assertEqual(one_result, other_result) + + +def patch_interfaces(testcase, interfaces): + """Patch `netifaces` to show the given `interfaces`. + + :param testcase: The testcase that's doing the patching. + :param interfaces: A dict mapping interface names to `netifaces` + interface entries: dicts with keys like `AF_INET` etc. + """ + # These two netifaces functions map conveniently onto dict methods. + testcase.patch(netifaces, 'interfaces', interfaces.keys) + testcase.patch(netifaces, 'ifaddresses', interfaces.get) + + +class TestGetAllAddressesForInterface(MAASTestCase): + """Tests for `get_all_addresses_for_interface`.""" + + scenarios = [ + ('ipv4', { + 'inet_class': AF_INET, + 'network_factory': factory.make_ipv4_network, + 'ip_address_factory': factory.make_ipv4_address, + 'loopback_address': '127.0.0.1', + }), + ('ipv6', { + 'inet_class': AF_INET6, + 'network_factory': factory.make_ipv6_network, + 'ip_address_factory': factory.make_ipv6_address, + 'loopback_address': '::1', + }), + ] + + def test__returns_address_for_inet_class(self): + ip = self.ip_address_factory() + interface = factory.make_name('eth', sep='') + patch_interfaces( + self, {interface: {self.inet_class: [{'addr': unicode(ip)}]}}) + self.assertEqual( + [ip], list(get_all_addresses_for_interface(interface))) + + def test__ignores_non_address_information(self): + network = self.network_factory() + ip = factory.pick_ip_in_network(network) + interface = factory.make_name('eth', sep='') + patch_interfaces(self, { + interface: { + self.inet_class: [{ + 'addr': unicode(ip), + 'broadcast': unicode(network.broadcast), + 'netmask': unicode(network.netmask), + 'peer': unicode( + factory.pick_ip_in_network(network, but_not=[ip])), + }], + }, + }) + self.assertEqual( + [ip], list(get_all_addresses_for_interface(interface))) + + def test__ignores_link_address(self): + interface = factory.make_name('eth', sep='') + patch_interfaces(self, { + interface: { + AF_LINK: [{ + 'addr': unicode(factory.make_mac_address()), + 'peer': unicode(factory.make_mac_address()), + }], + }, + }) + self.assertEqual([], list(get_all_addresses_for_interface(interface))) + + def test__ignores_interface_without_address(self): + network = self.network_factory() + interface = factory.make_name('eth', sep='') + patch_interfaces(self, { + interface: { + self.inet_class: [{ + 'broadcast': unicode(network.broadcast), + 'netmask': unicode(network.netmask), + }], + }, + }) + self.assertEqual([], list(get_all_addresses_for_interface(interface))) + + +class TestGetAllInterfaceAddresses(MAASTestCase): + """Tests for get_all_interface_addresses().""" + + def test__includes_loopback(self): + v4_loopback_address = '127.0.0.1' + v6_loopback_address = '::1' + patch_interfaces(self, { + 'lo': { + AF_INET: [ + {'addr': v4_loopback_address}], + AF_INET6: [ + {'addr': v6_loopback_address}], + }}) + self.assertEqual( + [v4_loopback_address, v6_loopback_address], + list(get_all_interface_addresses())) + + def test_returns_all_addresses_for_all_interfaces(self): + v4_ips = [factory.make_ipv4_address() for _ in range(2)] + v6_ips = [factory.make_ipv6_address() for _ in range(2)] + ips = zip(v4_ips, v6_ips) + interfaces = { + factory.make_name('eth', sep=''): { + AF_INET: [{'addr': unicode(ipv4)}], + AF_INET6: [{'addr': unicode(ipv6)}], + } + for ipv4, ipv6 in ips + } + patch_interfaces(self, interfaces) + self.assertItemsEqual( + v4_ips + v6_ips, + get_all_interface_addresses()) + + +class TestGetAllInterfaceAddressesWithMultipleClasses(MAASTestCase): + """Tests for get_all_interface_addresses() with multiple inet classes.""" + + def patch_interfaces(self, interfaces): + """Patch `netifaces` to show the given `interfaces`. + + :param interfaces: A dict mapping interface names to `netifaces` + interface entries: dicts with keys like `AF_INET` etc. + """ + # These two netifaces functions map conveniently onto dict methods. + self.patch(netifaces, 'interfaces', interfaces.keys) + self.patch(netifaces, 'ifaddresses', interfaces.get) + + def test_returns_all_addresses_for_interface(self): + v4_ip = factory.make_ipv4_address() + v6_ip = factory.make_ipv6_address() + interface = factory.make_name('eth', sep='') + patch_interfaces(self, { + interface: { + AF_INET: [ + {'addr': unicode(v4_ip)}], + AF_INET6: [ + {'addr': unicode(v6_ip)}], + } + }) + self.assertEqual([v4_ip, v6_ip], list(get_all_interface_addresses())) + + +class TestCleanUpNetifacesAddress(MAASTestCase): + """Tests for `clean_up_netifaces_address`.""" + + def test__leaves_IPv4_intact(self): + ip = unicode(factory.make_ipv4_address()) + interface = factory.make_name('eth') + self.assertEqual(ip, clean_up_netifaces_address(ip, interface)) + + def test__leaves_clean_IPv6_intact(self): + ip = unicode(factory.make_ipv6_address()) + interface = factory.make_name('eth') + self.assertEqual(ip, clean_up_netifaces_address(ip, interface)) + + def test__removes_zone_index_suffix(self): + ip = unicode(factory.make_ipv6_address()) + interface = factory.make_name('eth') + self.assertEqual( + ip, + clean_up_netifaces_address('%s%%%s' % (ip, interface), interface)) + + +class TestResolveHostname(MAASTestCase): + """Tests for `resolve_hostname`.""" + + def patch_getaddrinfo(self, *addrs): + fake = self.patch(network_module, 'getaddrinfo') + fake.return_value = [ + (None, None, None, None, (unicode(address), None)) + for address in addrs + ] + return fake + + def patch_getaddrinfo_fail(self, exception): + fake = self.patch(network_module, 'getaddrinfo') + fake.side_effect = exception + return fake + + def test__rejects_weird_IP_version(self): + self.assertRaises( + AssertionError, + resolve_hostname, factory.make_hostname(), ip_version=5) + + def test__integrates_with_getaddrinfo(self): + result = resolve_hostname('localhost', 4) + self.assertIsInstance(result, set) + [localhost] = result + self.assertIsInstance(localhost, IPAddress) + self.assertIn(localhost, IPNetwork('127.0.0.0/8')) + + def test__resolves_IPv4_address(self): + ip = factory.make_ipv4_address() + fake = self.patch_getaddrinfo(ip) + hostname = factory.make_hostname() + result = resolve_hostname(hostname, 4) + self.assertIsInstance(result, set) + self.assertEqual({IPAddress(ip)}, result) + self.assertThat(fake, MockCalledOnceWith(hostname, mock.ANY, AF_INET)) + + def test__resolves_IPv6_address(self): + ip = factory.make_ipv6_address() + fake = self.patch_getaddrinfo(ip) + hostname = factory.make_hostname() + result = resolve_hostname(hostname, 6) + self.assertIsInstance(result, set) + self.assertEqual({IPAddress(ip)}, result) + self.assertThat(fake, MockCalledOnceWith(hostname, mock.ANY, AF_INET6)) + + def test__returns_empty_if_address_does_not_resolve(self): + self.patch_getaddrinfo_fail( + gaierror(EAI_NONAME, "Name or service not known")) + self.assertEqual(set(), resolve_hostname(factory.make_hostname(), 4)) + + def test__returns_empty_if_address_resolves_to_no_data(self): + self.patch_getaddrinfo_fail( + gaierror(EAI_NODATA, "No data returned")) + self.assertEqual(set(), resolve_hostname(factory.make_hostname(), 4)) + + def test__propagates_other_gaierrors(self): + self.patch_getaddrinfo_fail(gaierror(EAI_BADFLAGS, "Bad parameters")) + self.assertRaises( + gaierror, + resolve_hostname, factory.make_hostname(), 4) + + def test__propagates_unexpected_errors(self): + self.patch_getaddrinfo_fail(KeyError("Huh what?")) + self.assertRaises( + KeyError, + resolve_hostname, factory.make_hostname(), 4) + + +class TestIntersectIPRange(MAASTestCase): + """Tests for `intersect_iprange()`.""" + + def test_finds_intersection_between_two_ranges(self): + range_1 = IPRange('10.0.0.1', '10.0.0.255') + range_2 = IPRange('10.0.0.128', '10.0.0.200') + intersect = intersect_iprange(range_1, range_2) + self.expectThat( + IPAddress(intersect.first), Equals(IPAddress('10.0.0.128'))) + self.expectThat( + IPAddress(intersect.last), Equals(IPAddress('10.0.0.200'))) + + def test_ignores_non_intersecting_ranges(self): + range_1 = IPRange('10.0.0.1', '10.0.0.255') + range_2 = IPRange('10.0.1.128', '10.0.1.200') + self.assertIsNone(intersect_iprange(range_1, range_2)) + + def test_finds_partial_intersection(self): + range_1 = IPRange('10.0.0.1', '10.0.0.128') + range_2 = IPRange('10.0.0.64', '10.0.0.200') + intersect = intersect_iprange(range_1, range_2) + self.expectThat( + IPAddress(intersect.first), Equals(IPAddress('10.0.0.64'))) + self.expectThat( + IPAddress(intersect.last), Equals(IPAddress('10.0.0.128'))) + + +class TestIPRangeWithinNetwork(MAASTestCase): + + def test_returns_true_when_ip_range_is_within_network(self): + ip_range = IPRange('10.0.0.55', '10.0.255.55') + ip_network = IPNetwork('10.0.0.0/16') + self.assertTrue(ip_range_within_network(ip_range, ip_network)) + + def test_returns_false_when_ip_range_is_within_network(self): + ip_range = IPRange('192.0.0.55', '192.0.255.55') + ip_network = IPNetwork('10.0.0.0/16') + self.assertFalse(ip_range_within_network(ip_range, ip_network)) + + def test_returns_false_when_ip_range_is_partially_within_network(self): + ip_range = IPRange('10.0.0.55', '10.1.0.55') + ip_network = IPNetwork('10.0.0.0/16') + self.assertFalse(ip_range_within_network(ip_range, ip_network)) + + def test_works_with_two_ip_networks(self): + network_1 = IPNetwork('10.0.0.0/16') + network_2 = IPNetwork('10.0.0.0/24') + self.assertTrue(ip_range_within_network(network_2, network_1)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_registry.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_registry.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_registry.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_registry.py 2015-07-10 01:27:14.000000000 +0000 @@ -42,7 +42,7 @@ def test_is_singleton_over_multiple_imports(self): Registry.register_item("resource1", sentinel.resource1) - from provisioningserver.driver import Registry as Registry2 + from provisioningserver.drivers import Registry as Registry2 Registry2.register_item("resource2", sentinel.resource2) self.assertItemsEqual( [("resource1", sentinel.resource1), diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_script.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_script.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_script.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_script.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,275 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for the subcommand utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from argparse import ( + ArgumentParser, + Namespace, + ) +import os +from random import randint +import stat +import StringIO +from subprocess import ( + CalledProcessError, + PIPE, + Popen, + ) +import sys +import types + +from maastesting import bindir +from maastesting.factory import factory +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +import provisioningserver.utils +from provisioningserver.utils.script import ( + ActionScript, + AtomicWriteScript, + MainScript, + ) +from testtools.matchers import ( + FileContains, + MatchesStructure, + ) + + +class TestActionScript(MAASTestCase): + """Test `ActionScript`.""" + + factory = ActionScript + + def setUp(self): + super(TestActionScript, self).setUp() + # ActionScript.setup() is not safe to run in the test suite. + self.patch(ActionScript, "setup", lambda self: None) + # ArgumentParser sometimes likes to print to stdout/err. Use + # StringIO.StringIO to be relaxed about bytes/unicode (argparse uses + # bytes). When moving to Python 3 this will need to be tightened up. + self.patch(sys, "stdout", StringIO.StringIO()) + self.patch(sys, "stderr", StringIO.StringIO()) + + def test_init(self): + description = factory.make_string() + script = self.factory(description) + self.assertIsInstance(script.parser, ArgumentParser) + self.assertEqual(description, script.parser.description) + + def test_register(self): + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: ( + self.assertIsInstance(parser, ArgumentParser)) + handler.run = lambda args: ( + self.assertIsInstance(args, int)) + script = self.factory("Description") + script.register("slay", handler) + self.assertIn("slay", script.subparsers.choices) + action_parser = script.subparsers.choices["slay"] + self.assertIsInstance(action_parser, ArgumentParser) + + def test_register_without_add_arguments(self): + # ActionScript.register will crash if the handler has no + # add_arguments() callable. + handler = types.ModuleType(b"handler") + handler.run = lambda args: None + script = self.factory("Description") + error = self.assertRaises( + AttributeError, script.register, "decapitate", handler) + self.assertIn("'add_arguments'", "%s" % error) + + def test_register_without_run(self): + # ActionScript.register will crash if the handler has no run() + # callable. + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: None + script = self.factory("Description") + error = self.assertRaises( + AttributeError, script.register, "decapitate", handler) + self.assertIn("'run'", "%s" % error) + + def test_call(self): + handler_calls = [] + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: None + handler.run = handler_calls.append + script = self.factory("Description") + script.register("amputate", handler) + error = self.assertRaises(SystemExit, script, ["amputate"]) + self.assertEqual(0, error.code) + self.assertEqual(1, len(handler_calls)) + self.assertIsInstance(handler_calls[0], Namespace) + + def test_call_invalid_choice(self): + script = self.factory("Description") + self.assertRaises(SystemExit, script, ["disembowel"]) + self.assertIn(b"invalid choice", sys.stderr.getvalue()) + + def test_call_with_exception(self): + # Most exceptions from run() are propagated. + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: None + handler.run = lambda args: 0 / 0 + script = self.factory("Description") + script.register("eviscerate", handler) + self.assertRaises(ZeroDivisionError, script, ["eviscerate"]) + + def test_call_with_process_exception(self): + # CalledProcessError is converted into SystemExit. + exception = CalledProcessError( + randint(0, 256), [factory.make_string()], + factory.make_string().encode("ascii")) + + def raise_exception(): + raise exception + + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: None + handler.run = lambda args: raise_exception() + script = self.factory("Description") + script.register("sever", handler) + error = self.assertRaises(SystemExit, script, ["sever"]) + self.assertEqual(exception.returncode, error.code) + + def test_call_with_keyboard_interrupt(self): + # KeyboardInterrupt is silently converted into SystemExit, with an + # exit code of 1. + + def raise_exception(): + raise KeyboardInterrupt() + + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: None + handler.run = lambda args: raise_exception() + script = self.factory("Description") + script.register("smash", handler) + error = self.assertRaises(SystemExit, script, ["smash"]) + self.assertEqual(1, error.code) + + +class TestMainScript(TestActionScript): + + factory = MainScript + + def test_default_arguments(self): + # MainScript accepts a --config-file parameter. The value of this is + # passed through into the args namespace object as config_file. + handler_calls = [] + handler = types.ModuleType(b"handler") + handler.add_arguments = lambda parser: None + handler.run = handler_calls.append + script = self.factory("Description") + script.register("dislocate", handler) + dummy_config_file = factory.make_name("config-file") + # --config-file is specified before the action. + args = ["--config-file", dummy_config_file, "dislocate"] + error = self.assertRaises(SystemExit, script, args) + self.assertEqual(0, error.code) + namespace = handler_calls[0] + self.assertEqual( + {"config_file": dummy_config_file, "handler": handler}, + vars(namespace)) + + +class TestAtomicWriteScript(MAASTestCase): + + def setUp(self): + super(TestAtomicWriteScript, self).setUp() + # Silence ArgumentParser. + self.patch(sys, "stdout", StringIO.StringIO()) + self.patch(sys, "stderr", StringIO.StringIO()) + + def get_parser(self): + parser = ArgumentParser() + AtomicWriteScript.add_arguments(parser) + return parser + + def get_and_run_mocked_script(self, content, filename, *args): + self.patch(sys, "stdin", StringIO.StringIO(content)) + parser = self.get_parser() + parsed_args = parser.parse_args(*args) + mocked_atomic_write = self.patch( + provisioningserver.utils.script, 'atomic_write') + AtomicWriteScript.run(parsed_args) + return mocked_atomic_write + + def test_arg_setup(self): + parser = self.get_parser() + filename = factory.make_string() + args = parser.parse_args(( + '--no-overwrite', + '--filename', filename, + '--mode', "111")) + self.assertThat( + args, MatchesStructure.byEquality( + no_overwrite=True, + filename=filename, + mode="111")) + + def test_filename_arg_required(self): + parser = self.get_parser() + self.assertRaises(SystemExit, parser.parse_args, ('--no-overwrite',)) + + def test_no_overwrite_defaults_to_false(self): + parser = self.get_parser() + filename = factory.make_string() + args = parser.parse_args(('--filename', filename)) + self.assertFalse(args.no_overwrite) + + def test_script_executable(self): + content = factory.make_string() + script = [os.path.join(bindir, "maas-provision"), 'atomic-write'] + target_file = self.make_file() + script.extend(('--filename', target_file, '--mode', '615')) + cmd = Popen( + script, stdin=PIPE, + env=dict(PYTHONPATH=":".join(sys.path))) + cmd.communicate(content) + self.assertThat(target_file, FileContains(content)) + self.assertEqual(0615, stat.S_IMODE(os.stat(target_file).st_mode)) + + def test_passes_overwrite_flag(self): + content = factory.make_string() + filename = factory.make_string() + mocked_atomic_write = self.get_and_run_mocked_script( + content, filename, + ('--filename', filename, '--no-overwrite')) + + self.assertThat( + mocked_atomic_write, + MockCalledOnceWith(content, filename, mode=0600, overwrite=False)) + + def test_passes_mode_flag(self): + content = factory.make_string() + filename = factory.make_string() + # Mode that's unlikely to occur in the wild. + mode = 0377 + mocked_atomic_write = self.get_and_run_mocked_script( + content, filename, + ('--filename', filename, '--mode', oct(mode))) + + self.assertThat( + mocked_atomic_write, + MockCalledOnceWith(content, filename, mode=mode, overwrite=True)) + + def test_default_mode(self): + content = factory.make_string() + filename = factory.make_string() + mocked_atomic_write = self.get_and_run_mocked_script( + content, filename, + ('--filename', filename)) + + self.assertThat( + mocked_atomic_write, + MockCalledOnceWith(content, filename, mode=0600, overwrite=True)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_shell.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_shell.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_shell.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_shell.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,280 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for utilities to execute external commands.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import os +from random import randint +import re +import signal +from subprocess import CalledProcessError +import time + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.shell import ( + call_and_check, + ExternalProcessError, + objectfork, + pipefork, + PipeForkError, + ) +import provisioningserver.utils.shell as shell_module +from testtools import ExpectedException +from testtools.matchers import ( + Is, + IsInstance, + Not, + ) + + +class TestCallAndCheck(MAASTestCase): + """Tests `call_and_check`.""" + + def patch_popen(self, returncode=0, stderr=''): + """Replace `subprocess.Popen` with a mock.""" + popen = self.patch(shell_module, 'Popen') + process = popen.return_value + process.communicate.return_value = (None, stderr) + process.returncode = returncode + return process + + def test__returns_standard_output(self): + output = factory.make_string() + self.assertEqual(output, call_and_check(['/bin/echo', '-n', output])) + + def test__raises_ExternalProcessError_on_failure(self): + command = factory.make_name('command') + message = factory.make_string() + self.patch_popen(returncode=1, stderr=message) + error = self.assertRaises( + ExternalProcessError, call_and_check, command) + self.assertEqual(1, error.returncode) + self.assertEqual(command, error.cmd) + self.assertEqual(message, error.output) + + def test__reports_stderr_on_failure(self): + nonfile = os.path.join(self.make_dir(), factory.make_name('nonesuch')) + error = self.assertRaises( + ExternalProcessError, + call_and_check, ['/bin/cat', nonfile], env={'LC_ALL': 'C'}) + self.assertEqual( + "/bin/cat: %s: No such file or directory" % nonfile, + error.output) + + +class TestExternalProcessError(MAASTestCase): + """Tests for the ExternalProcessError class.""" + + def test_upgrade_upgrades_CalledProcessError(self): + error = factory.make_CalledProcessError() + self.expectThat(error, Not(IsInstance(ExternalProcessError))) + ExternalProcessError.upgrade(error) + self.expectThat(error, IsInstance(ExternalProcessError)) + + def test_upgrade_does_not_change_CalledProcessError_subclasses(self): + error_type = factory.make_exception_type(bases=(CalledProcessError,)) + error = factory.make_CalledProcessError() + error.__class__ = error_type # Change the class. + self.expectThat(error, Not(IsInstance(ExternalProcessError))) + ExternalProcessError.upgrade(error) + self.expectThat(error, Not(IsInstance(ExternalProcessError))) + self.expectThat(error.__class__, Is(error_type)) + + def test_upgrade_does_not_change_other_errors(self): + error_type = factory.make_exception_type() + error = error_type() + self.expectThat(error, Not(IsInstance(ExternalProcessError))) + ExternalProcessError.upgrade(error) + self.expectThat(error, Not(IsInstance(ExternalProcessError))) + self.expectThat(error.__class__, Is(error_type)) + + def test_upgrade_returns_None(self): + self.expectThat( + ExternalProcessError.upgrade(factory.make_exception()), + Is(None)) + + def test_to_unicode_decodes_to_unicode(self): + # Byte strings are decoded as ASCII by _to_unicode(), replacing + # all non-ASCII characters with U+FFFD REPLACEMENT CHARACTERs. + byte_string = b"This string will be converted. \xe5\xb2\x81\xe5." + expected_unicode_string = ( + u"This string will be converted. \ufffd\ufffd\ufffd\ufffd.") + converted_string = ExternalProcessError._to_unicode(byte_string) + self.assertIsInstance(converted_string, unicode) + self.assertEqual(expected_unicode_string, converted_string) + + def test_to_unicode_defers_to_unicode_constructor(self): + # Unicode strings and non-byte strings are handed to unicode() + # to undergo Python's normal coercion strategy. (For unicode + # strings this is actually a no-op, but it's cheaper to do this + # than special-case unicode strings.) + self.assertEqual( + unicode(self), ExternalProcessError._to_unicode(self)) + + def test_to_ascii_encodes_to_bytes(self): + # Yes, this is how you really spell "smorgasbord." Look it up. + unicode_string = u"Sm\xf6rg\xe5sbord" + expected_byte_string = b"Sm?rg?sbord" + converted_string = ExternalProcessError._to_ascii(unicode_string) + self.assertIsInstance(converted_string, bytes) + self.assertEqual(expected_byte_string, converted_string) + + def test_to_ascii_defers_to_bytes(self): + # Byte strings and non-unicode strings are handed to bytes() to + # undergo Python's normal coercion strategy. (For byte strings + # this is actually a no-op, but it's cheaper to do this than + # special-case byte strings.) + self.assertEqual(bytes(self), ExternalProcessError._to_ascii(self)) + + def test_to_ascii_removes_non_printable_chars(self): + # After conversion to a byte string, all non-printable and + # non-ASCII characters are replaced with question marks. + byte_string = b"*How* many roads\x01\x02\xb2\xfe" + expected_byte_string = b"*How* many roads????" + converted_string = ExternalProcessError._to_ascii(byte_string) + self.assertIsInstance(converted_string, bytes) + self.assertEqual(expected_byte_string, converted_string) + + def test__str__returns_bytes(self): + error = ExternalProcessError(returncode=-1, cmd="foo-bar") + self.assertIsInstance(error.__str__(), bytes) + + def test__unicode__returns_unicode(self): + error = ExternalProcessError(returncode=-1, cmd="foo-bar") + self.assertIsInstance(error.__unicode__(), unicode) + + def test__str__contains_output(self): + output = b"Joyeux No\xebl" + ascii_output = "Joyeux No?l" + error = ExternalProcessError( + returncode=-1, cmd="foo-bar", output=output) + self.assertIn(ascii_output, error.__str__()) + + def test__unicode__contains_output(self): + output = b"Mot\xf6rhead" + unicode_output = "Mot\ufffdrhead" + error = ExternalProcessError( + returncode=-1, cmd="foo-bar", output=output) + self.assertIn(unicode_output, error.__unicode__()) + + def test_output_as_ascii(self): + output = b"Joyeux No\xebl" + ascii_output = "Joyeux No?l" + error = ExternalProcessError( + returncode=-1, cmd="foo-bar", output=output) + self.assertEqual(ascii_output, error.output_as_ascii) + + def test_output_as_unicode(self): + output = b"Mot\xf6rhead" + unicode_output = "Mot\ufffdrhead" + error = ExternalProcessError( + returncode=-1, cmd="foo-bar", output=output) + self.assertEqual(unicode_output, error.output_as_unicode) + + +class TestPipeFork(MAASTestCase): + + def test__forks(self): + with pipefork() as (pid, fin, fout): + if pid == 0: + # Child. + message_in = fin.read() + message_out = b"Hello %s!" % message_in + fout.write(message_out) + fout.close() + else: + # Parent. + message_out = factory.make_name("Parent").encode("ascii") + fout.write(message_out) + fout.close() + message_in = fin.read() + self.assertEqual(b"Hello %s!" % message_out, message_in) + + def test__raises_childs_exception_when_child_crashes(self): + # If the child process exits with an exception, it is passed back to + # the parent via a pickled t.p.failure.Failure, and re-raised. + with ExpectedException(ZeroDivisionError): + with pipefork() as (pid, fin, fout): + if pid == 0: + # Child. + raise ZeroDivisionError() + + def test__raises_exception_when_child_killed_by_signal(self): + expected_message = re.escape("Child killed by signal 15 (SIGTERM)") + with ExpectedException(PipeForkError, expected_message): + with pipefork() as (pid, fin, fout): + if pid == 0: + # Close `fout` to signal to parent that we're running. + fout.close() + time.sleep(10) + else: + # Wait for child to close its `fout` before signalling. + fin.read() + os.kill(pid, signal.SIGTERM) + + def test__raises_exception_when_child_exits_with_non_zero_code(self): + exit_code = randint(1, 99) + expected_message = re.escape("Child exited with code %s" % exit_code) + with ExpectedException(PipeForkError, expected_message): + with pipefork() as (pid, fin, fout): + if pid == 0: + os._exit(exit_code) + + def test__SystemExit_in_child_is_not_raised_in_parent(self): + # All exceptions are pickled and passed back to the parent process, + # except for SystemExit. It instead results in a call to os._exit(). + exit_code = randint(1, 99) + expected_message = re.escape("Child exited with code %s" % exit_code) + with ExpectedException(PipeForkError, expected_message): + with pipefork() as (pid, fin, fout): + if pid == 0: + raise SystemExit(exit_code) + + +class TestObjectFork(MAASTestCase): + + def test__can_send_and_receive_objects(self): + + def child(recv, send): + # Sum numbers until we get None through. + for numbers in iter(recv, None): + send(sum(numbers)) + # Now echo things until we get None. + for things in iter(recv, None): + send(things) + + def parent(recv, send): + # Send numbers to the child first. + for _ in xrange(randint(3, 10)): + numbers = list(randint(1, 100) for _ in xrange(10)) + send(numbers) + self.assertEqual(sum(numbers), recv()) + # Signal that we're done with numbers. + send(None) + # Send some other things and see that they come back. + picklable_things = { + "foo": [randint(1, 1000) for _ in xrange(10)], + (1, 2, b"three", 4.0): {self.__class__, "bar"}, + } + send(picklable_things) + self.assertEqual(picklable_things, recv()) + # Signal that we're done again. + send(None) + + with objectfork() as (pid, recv, send): + if pid == 0: + child(recv, send) + else: + parent(recv, send) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_text.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_text.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_text.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_text.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,79 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for text processing utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from textwrap import dedent + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.text import ( + make_bullet_list, + normalise_whitespace, + ) + + +class TestNormaliseWhitespace(MAASTestCase): + + def test__preserves_text_without_whitespace(self): + word = factory.make_name('word') + self.assertEqual(word, normalise_whitespace(word)) + + def test__eliminates_leading_space(self): + self.assertEqual('word', normalise_whitespace(' word')) + + def test__eliminates_trailing_space(self): + self.assertEqual('word', normalise_whitespace('word ')) + + def test__replaces_any_whitespace_sequence_with_single_space(self): + self.assertEqual( + 'one two three', + normalise_whitespace('one two\t\nthree')) + + def test__treats_punctuation_as_non_space(self): + punctuation = '.?;:!' + self.assertEqual(punctuation, normalise_whitespace(punctuation)) + + +class TestMakeBulletList(MAASTestCase): + + def test__returns_empty_string_when_there_are_no_messages(self): + self.assertEqual("", make_bullet_list([])) + + def test__wraps_at_72_columns(self): + lines = make_bullet_list([" -" * 50]).splitlines() + self.assertEqual(72, max(len(line) for line in lines)) + + def test__fills_and_formats(self): + messages = [ + """Lorem ipsum dolor sit amet, consectetur adipiscing elit. + Maecenas a lorem pellentesque, dapibus lorem ut, blandit ex.""", + """Nulla tristique quam sed suscipit cursus""", + """Integer euismod viverra ipsum, id placerat ante interdum vitae. + Mauris fermentum ut nisi vitae tincidunt. Maecenas posuere lacus + vel est dignissim vehicula. Vestibulum tristique, massa non + facilisis mattis, nisi lacus lacinia neque, nec convallis risus + turpis id metus. Aenean semper sapien sed volutpat volutpat.""", + ] + bullet_list = make_bullet_list(messages) + bullet_list_expected = dedent("""\ + * Lorem ipsum dolor sit amet, consectetur adipiscing elit. + Maecenas a lorem pellentesque, dapibus lorem ut, blandit ex. + * Nulla tristique quam sed suscipit cursus + * Integer euismod viverra ipsum, id placerat ante interdum vitae. + Mauris fermentum ut nisi vitae tincidunt. Maecenas posuere lacus + vel est dignissim vehicula. Vestibulum tristique, massa non + facilisis mattis, nisi lacus lacinia neque, nec convallis risus + turpis id metus. Aenean semper sapien sed volutpat volutpat.""") + self.assertEqual(bullet_list_expected, bullet_list) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_twisted.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_twisted.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_twisted.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_twisted.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,777 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for Twisted/Crochet-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import operator +from random import ( + randint, + random, + ) +import re +import time + +from crochet import EventualResult +from maastesting.factory import factory +from maastesting.matchers import ( + IsCallable, + IsUnfiredDeferred, + MockCalledOnceWith, + ) +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) +from mock import ( + Mock, + sentinel, + ) +from provisioningserver.utils import twisted as twisted_module +from provisioningserver.utils.twisted import ( + asynchronous, + callOut, + DeferredValue, + deferWithTimeout, + FOREVER, + pause, + reactor_sync, + retries, + synchronous, + ) +from testtools.deferredruntest import extract_result +from testtools.matchers import ( + AfterPreprocessing, + Equals, + HasLength, + Is, + IsInstance, + MatchesAll, + MatchesException, + MatchesListwise, + MatchesStructure, + Not, + Raises, + ) +from testtools.testcase import ExpectedException +from twisted.internet import reactor +from twisted.internet.defer import ( + AlreadyCalledError, + CancelledError, + Deferred, + inlineCallbacks, + ) +from twisted.internet.task import Clock +from twisted.internet.threads import deferToThread +from twisted.python import threadable +from twisted.python.failure import Failure + + +def return_args(*args, **kwargs): + return args, kwargs + + +class TestAsynchronousDecorator(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_in_reactor_thread(self): + result = asynchronous(return_args)(1, 2, three=3) + self.assertEqual(((1, 2), {"three": 3}), result) + + @inlineCallbacks + def test_in_other_thread(self): + def do_stuff_in_thread(): + result = asynchronous(return_args)(3, 4, five=5) + self.assertThat(result, IsInstance(EventualResult)) + return result.wait() + # Call do_stuff_in_thread() from another thread. + result = yield deferToThread(do_stuff_in_thread) + # do_stuff_in_thread() waited for the result of return_args(). + # The arguments passed back match those passed in from + # do_stuff_in_thread(). + self.assertEqual(((3, 4), {"five": 5}), result) + + +noop = lambda: None + + +class TestAsynchronousDecoratorWithTimeout(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test_timeout_cannot_be_None(self): + self.assertRaises(ValueError, asynchronous, noop, timeout=None) + + def test_timeout_cannot_be_negative(self): + self.assertRaises(ValueError, asynchronous, noop, timeout=-1) + + def test_timeout_can_be_int(self): + self.assertThat(asynchronous(noop, timeout=1), IsCallable()) + + def test_timeout_can_be_long(self): + self.assertThat(asynchronous(noop, timeout=1L), IsCallable()) + + def test_timeout_can_be_float(self): + self.assertThat(asynchronous(noop, timeout=1.0), IsCallable()) + + def test_timeout_can_be_forever(self): + self.assertThat(asynchronous(noop, timeout=FOREVER), IsCallable()) + + +class TestAsynchronousDecoratorWithTimeoutDefined(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + scenarios = ( + ("finite", {"timeout": random()}), + ("forever", {"timeout": FOREVER}), + ) + + def test_in_reactor_thread(self): + return_args_async = asynchronous(return_args, self.timeout) + result = return_args_async(1, 2, three=3) + self.assertEqual(((1, 2), {"three": 3}), result) + + @inlineCallbacks + def test_in_other_thread(self): + return_args_async = asynchronous(return_args, self.timeout) + # Call self.return_args from another thread. + result = yield deferToThread(return_args_async, 3, 4, five=5) + # The arguments passed back match those passed in. + self.assertEqual(((3, 4), {"five": 5}), result) + + @inlineCallbacks + def test__passes_timeout_to_wait(self): + # These mocks are going to help us tell a story of a timeout. + run_in_reactor = self.patch(twisted_module, "run_in_reactor") + func_in_reactor = run_in_reactor.return_value + eventual_result = func_in_reactor.return_value + wait = eventual_result.wait + wait.return_value = sentinel.result + + # Our placeholder function, and its wrapped version. + do_nothing = lambda: None + do_nothing_async = asynchronous(do_nothing, timeout=self.timeout) + + # Call our wrapped function in a thread so that the wrapper calls back + # into the IO thread, via the time-out logic. + result = yield deferToThread(do_nothing_async) + self.expectThat(result, Equals(sentinel.result)) + + # Here's what happened, or should have: + # 1. do_nothing was wrapped by run_in_reactor, producing + # func_in_reactor. + self.assertThat(run_in_reactor, MockCalledOnceWith(do_nothing)) + # 2. func_in_reactor was called with no arguments, because we didn't + # pass any, producing eventual_result. + self.assertThat(func_in_reactor, MockCalledOnceWith()) + # 3. eventual_result.wait was called... + if self.timeout is FOREVER: + # ...without arguments. + self.assertThat(wait, MockCalledOnceWith()) + else: + # ...with the timeout we passed when we wrapped do_nothing. + self.assertThat(wait, MockCalledOnceWith(self.timeout)) + + +class TestSynchronousDecorator(MAASTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + @synchronous + def return_args(self, *args, **kwargs): + return args, kwargs + + def test_in_reactor_thread(self): + expected = MatchesException( + AssertionError, re.escape( + "Function return_args(...) must not be called " + "in the reactor thread.")) + self.assertThat(self.return_args, Raises(expected)) + + @inlineCallbacks + def test_in_other_thread(self): + def do_stuff_in_thread(): + return self.return_args(3, 4, five=5) + # Call do_stuff_in_thread() from another thread. + result = yield deferToThread(do_stuff_in_thread) + # do_stuff_in_thread() ran straight through, without + # modification. The arguments passed back match those passed in + # from do_stuff_in_thread(). + self.assertEqual(((3, 4), {"five": 5}), result) + + def test_allows_call_in_any_thread_when_reactor_not_running(self): + self.patch(reactor, "running", False) + self.assertEqual(((3, 4), {"five": 5}), self.return_args(3, 4, five=5)) + + +class TestReactorSync(MAASTestCase): + """Tests for `reactor_sync`.""" + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def test__does_what_it_claims(self): + whence = [] + + def record_whence_while_in_sync_with_reactor(): + # Sync up with the reactor three times. This increases the chance + # that something unexpected could happen, thus breaking the test. + # The hope is, naturally, that nothing breaks. It also means we + # can see the reactor spinning in between; see the callLater() to + # see how we measure this. + for _ in xrange(3): + with reactor_sync(): + # Schedule a call that the reactor will make when we + # release sync with it. + reactor.callLater(0, whence.append, "reactor") + # Spin a bit to demonstrate that the reactor doesn't run + # while we're in the reactor_sync context. + for _ in xrange(10): + whence.append("thread") + # Sleep for a moment to allow other threads - like the + # reactor's thread - a chance to run. Our bet is that + # the reactor's thread _won't_ run because we're + # synchronised with it. + time.sleep(0.01) + + def check(_): + self.assertEqual( + (["thread"] * 10) + ["reactor"] + + (["thread"] * 10) + ["reactor"] + + (["thread"] * 10) + ["reactor"], + whence) + + d = deferToThread(record_whence_while_in_sync_with_reactor) + d.addCallback(check) + return d + + def test__updates_io_thread(self): + # We're in the reactor thread right now. + self.assertTrue(threadable.isInIOThread()) + reactorThread = threadable.ioThread + + # The rest of this test runs in a separate thread. + def in_thread(): + thisThread = threadable.getThreadID() + # This is definitely not the reactor thread. + self.assertNotEqual(thisThread, reactorThread) + # The IO thread is still the reactor thread. + self.assertEqual(reactorThread, threadable.ioThread) + self.assertFalse(threadable.isInIOThread()) + # When we sync with the reactor the current thread is marked + # as the IO thread. + with reactor_sync(): + self.assertEqual(thisThread, threadable.ioThread) + self.assertTrue(threadable.isInIOThread()) + # When sync is released the IO thread reverts to the + # reactor's thread. + self.assertEqual(reactorThread, threadable.ioThread) + self.assertFalse(threadable.isInIOThread()) + + return deferToThread(in_thread) + + def test__releases_sync_on_error(self): + + def in_thread(): + with reactor_sync(): + raise RuntimeError("Boom") + + def check(failure): + failure.trap(RuntimeError) + + # The test is that this completes; if sync with the reactor + # thread is not released then this will deadlock. + return deferToThread(in_thread).addCallbacks(self.fail, check) + + def test__restores_io_thread_on_error(self): + # We're in the reactor thread right now. + self.assertTrue(threadable.isInIOThread()) + reactorThread = threadable.ioThread + + def in_thread(): + with reactor_sync(): + raise RuntimeError("Boom") + + def check(failure): + failure.trap(RuntimeError) + self.assertEqual(reactorThread, threadable.ioThread) + self.assertTrue(threadable.isInIOThread()) + + return deferToThread(in_thread).addCallbacks(self.fail, check) + + def test__does_nothing_in_the_reactor_thread(self): + self.assertTrue(threadable.isInIOThread()) + with reactor_sync(): + self.assertTrue(threadable.isInIOThread()) + self.assertTrue(threadable.isInIOThread()) + + def test__does_nothing_in_the_reactor_thread_on_error(self): + self.assertTrue(threadable.isInIOThread()) + with ExpectedException(RuntimeError): + with reactor_sync(): + self.assertTrue(threadable.isInIOThread()) + raise RuntimeError("I sneezed") + self.assertTrue(threadable.isInIOThread()) + + +class TestRetries(MAASTestCase): + + def assertRetry( + self, clock, observed, expected_elapsed, expected_remaining, + expected_wait): + """Assert that the retry tuple matches the given expectations. + + Retry tuples are those returned by `retries`. + """ + self.assertThat(observed, MatchesListwise([ + Equals(expected_elapsed), # elapsed + Equals(expected_remaining), # remaining + Equals(expected_wait), # wait + ])) + + def test_yields_elapsed_remaining_and_wait(self): + # Take control of time. + clock = Clock() + + gen_retries = retries(5, 2, clock=clock) + # No time has passed, 5 seconds remain, and it suggests sleeping + # for 2 seconds. + self.assertRetry(clock, next(gen_retries), 0, 5, 2) + # Mimic sleeping for the suggested sleep time. + clock.advance(2) + # Now 2 seconds have passed, 3 seconds remain, and it suggests + # sleeping for 2 more seconds. + self.assertRetry(clock, next(gen_retries), 2, 3, 2) + # Mimic sleeping for the suggested sleep time. + clock.advance(2) + # Now 4 seconds have passed, 1 second remains, and it suggests + # sleeping for just 1 more second. + self.assertRetry(clock, next(gen_retries), 4, 1, 1) + # Mimic sleeping for the suggested sleep time. + clock.advance(1) + # There's always a final chance to try something. + self.assertRetry(clock, next(gen_retries), 5, 0, 0) + # All done. + self.assertRaises(StopIteration, next, gen_retries) + + def test_calculates_times_with_reference_to_current_time(self): + # Take control of time. + clock = Clock() + + gen_retries = retries(5, 2, clock=clock) + # No time has passed, 5 seconds remain, and it suggests sleeping + # for 2 seconds. + self.assertRetry(clock, next(gen_retries), 0, 5, 2) + # Mimic sleeping for 4 seconds, more than the suggested. + clock.advance(4) + # Now 4 seconds have passed, 1 second remains, and it suggests + # sleeping for just 1 more second. + self.assertRetry(clock, next(gen_retries), 4, 1, 1) + # Don't sleep, ask again immediately, and the same answer is given. + self.assertRetry(clock, next(gen_retries), 4, 1, 1) + # Mimic sleeping for 100 seconds, much more than the suggested. + clock.advance(100) + # There's always a final chance to try something, but the elapsed and + # remaining figures are still calculated with reference to the current + # time. The wait time never goes below zero. + self.assertRetry(clock, next(gen_retries), 104, -99, 0) + # All done. + self.assertRaises(StopIteration, next, gen_retries) + + +class TestPause(MAASTestCase): + + p_deferred_called = AfterPreprocessing( + lambda d: bool(d.called), Is(True)) + p_deferred_cancelled = AfterPreprocessing( + lambda d: d.result, MatchesAll( + IsInstance(Failure), AfterPreprocessing( + lambda failure: failure.value, + IsInstance(CancelledError)))) + p_call_cancelled = AfterPreprocessing( + lambda call: bool(call.cancelled), Is(True)) + p_call_called = AfterPreprocessing( + lambda call: bool(call.called), Is(True)) + + def test_pause_returns_a_deferred_that_fires_after_a_delay(self): + # Take control of time. + clock = Clock() + wait = randint(4, 4000) + + p_call_scheduled_in_wait_seconds = AfterPreprocessing( + lambda call: call.getTime(), Equals(wait)) + + d = pause(wait, clock=clock) + + # pause() returns an uncalled deferred. + self.assertIsInstance(d, Deferred) + self.assertThat(d, Not(self.p_deferred_called)) + # pause() has scheduled a call to happen in `wait` seconds. + self.assertThat(clock.getDelayedCalls(), HasLength(1)) + [delayed_call] = clock.getDelayedCalls() + self.assertThat(delayed_call, MatchesAll( + p_call_scheduled_in_wait_seconds, + Not(self.p_call_cancelled), + Not(self.p_call_called), + )) + # Nothing has changed right before the deadline. + clock.advance(wait - 1) + self.assertThat(d, Not(self.p_deferred_called)) + self.assertThat(delayed_call, MatchesAll( + Not(self.p_call_cancelled), Not(self.p_call_called))) + # After `wait` seconds the deferred is called. + clock.advance(1) + self.assertThat(d, self.p_deferred_called) + self.assertThat(delayed_call, MatchesAll( + Not(self.p_call_cancelled), self.p_call_called)) + # The result is unexciting. + self.assertIsNone(d.result) + + def test_pause_can_be_cancelled(self): + # Take control of time. + clock = Clock() + wait = randint(4, 4000) + + d = pause(wait, clock=clock) + [delayed_call] = clock.getDelayedCalls() + + d.cancel() + + # The deferred has been cancelled. + self.assertThat(d, MatchesAll( + self.p_deferred_called, self.p_deferred_cancelled, + first_only=True)) + + # We must suppress the cancellation error here or the test suite + # will get huffy about it. + d.addErrback(lambda failure: None) + + # The delayed call was cancelled too. + self.assertThat(delayed_call, MatchesAll( + self.p_call_cancelled, Not(self.p_call_called))) + + +DelayedCallActive = MatchesStructure( + cancelled=AfterPreprocessing(bool, Is(False)), + called=AfterPreprocessing(bool, Is(False)), +) + +DelayedCallCancelled = MatchesStructure( + cancelled=AfterPreprocessing(bool, Is(True)), + called=AfterPreprocessing(bool, Is(False)), +) + +DelayedCallCalled = MatchesStructure( + cancelled=AfterPreprocessing(bool, Is(False)), + called=AfterPreprocessing(bool, Is(True)), +) + + +class TestDeferWithTimeout(MAASTestCase): + + def test__returns_Deferred_that_will_be_cancelled_after_timeout(self): + clock = self.patch(twisted_module, "reactor", Clock()) + + # Called with only a timeout, `deferWithTimeout` returns a Deferred. + timeout = randint(10, 100) + d = deferWithTimeout(timeout) + self.assertThat(d, IsInstance(Deferred)) + self.assertFalse(d.called) + + # It's been scheduled for cancellation in `timeout` seconds. + self.assertThat(clock.getDelayedCalls(), HasLength(1)) + [delayed_call] = clock.getDelayedCalls() + self.assertThat(delayed_call, DelayedCallActive) + self.assertThat(delayed_call, MatchesStructure.byEquality( + time=timeout, func=d.cancel, args=(), kw={})) + + # Once the timeout is reached, the delayed call is called, and this + # cancels `d`. The default canceller for Deferred errbacks with + # CancelledError. + clock.advance(timeout) + self.assertThat(delayed_call, DelayedCallCalled) + self.assertRaises(CancelledError, extract_result, d) + + def test__returns_Deferred_that_wont_be_cancelled_if_called(self): + clock = self.patch(twisted_module, "reactor", Clock()) + + # Called without a function argument, `deferWithTimeout` returns a new + # Deferred, and schedules it to be cancelled in `timeout` seconds. + timeout = randint(10, 100) + d = deferWithTimeout(timeout) + [delayed_call] = clock.getDelayedCalls() + + # Advance some amount of time to simulate something happening. + clock.advance(5) + # The timeout call is still in place. + self.assertThat(delayed_call, DelayedCallActive) + + d.callback(sentinel.result) + # After calling d the timeout call has been cancelled. + self.assertThat(delayed_call, DelayedCallCancelled) + # The result has been safely passed on. + self.assertThat(extract_result(d), Is(sentinel.result)) + + def test__returns_Deferred_that_wont_be_cancelled_if_errored(self): + clock = self.patch(twisted_module, "reactor", Clock()) + + # Called without a function argument, `deferWithTimeout` returns a new + # Deferred, and schedules it to be cancelled in `timeout` seconds. + timeout = randint(10, 100) + d = deferWithTimeout(timeout) + [delayed_call] = clock.getDelayedCalls() + + # Advance some amount of time to simulate something happening, but + # less than the timeout. + clock.advance(timeout - 1) + # The timeout call is still in place. + self.assertThat(delayed_call, DelayedCallActive) + + error = RuntimeError() + d.errback(error) + # After calling d the timeout call has been cancelled. + self.assertThat(delayed_call, DelayedCallCancelled) + # The error has been passed safely on. + self.assertRaises(RuntimeError, extract_result, d) + + def test__calls_given_function(self): + clock = self.patch(twisted_module, "reactor", Clock()) + + class OurDeferred(Deferred): + """A Deferred subclass that we use as a marker.""" + + # Any given function is called via `maybeDeferred`. In this case, we + # get an instance of our marker class back because it is a Deferred. + timeout = randint(10, 100) + d = deferWithTimeout(timeout, OurDeferred) + self.assertThat(d, IsInstance(OurDeferred)) + self.assertFalse(d.called) + + # Just as with the non-function form, it's been scheduled for + # cancellation in `timeout` seconds. + self.assertThat(clock.getDelayedCalls(), HasLength(1)) + [delayed_call] = clock.getDelayedCalls() + self.assertThat(delayed_call, DelayedCallActive) + self.assertThat(delayed_call, MatchesStructure.byEquality( + time=timeout, func=d.cancel, args=(), kw={})) + + # Once the timeout is reached, the delayed call is called, and this + # cancels `d`. The default canceller for Deferred errbacks with + # CancelledError. + clock.advance(timeout) + self.assertThat(delayed_call, DelayedCallCalled) + self.assertRaises(CancelledError, extract_result, d) + + def test__calls_given_function_and_always_returns_Deferred(self): + clock = self.patch(twisted_module, "reactor", Clock()) + + def do_something(a, *b, **c): + return do_something, a, b, c + + # Any given function is called via `maybeDeferred`. In this case, we + # get an already-called Deferred, because `do_something` is + # synchronous. + timeout = randint(10, 100) + d = deferWithTimeout( + timeout, do_something, sentinel.a, sentinel.b, c=sentinel.c) + self.assertThat(d, IsInstance(Deferred)) + self.assertEqual( + (do_something, sentinel.a, (sentinel.b,), {"c": sentinel.c}), + extract_result(d)) + + # The timeout has already been cancelled. + self.assertThat(clock.getDelayedCalls(), Equals([])) + + +class TestCallOut(MAASTestCase): + """Tests for `callOut`.""" + + def test__without_arguments(self): + func = Mock() + func_callout = callOut(func) + # The result is passed through untouched. + self.assertThat(func_callout(sentinel.result), Is(sentinel.result)) + self.assertThat(func, MockCalledOnceWith()) + + def test__with_arguments(self): + func = Mock() + func_callout = callOut(func, sentinel.a, sentinel.b, c=sentinel.c) + # The result is passed through untouched. + self.assertThat(func_callout(sentinel.result), Is(sentinel.result)) + self.assertThat(func, MockCalledOnceWith( + sentinel.a, sentinel.b, c=sentinel.c)) + + def test__does_not_suppress_errors(self): + func_callout = callOut(operator.div, 0, 0) + self.assertRaises(ZeroDivisionError, func_callout, sentinel.result) + + +class TestDeferredValue(MAASTestCase): + """Tests for `DeferredValue`.""" + + def test__create(self): + dvalue = DeferredValue() + self.assertEqual({"waiters": set()}, vars(dvalue)) + + def test__get_returns_a_Deferred(self): + dvalue = DeferredValue() + self.assertThat(dvalue.get(), IsInstance(Deferred)) + + def test__get_returns_a_Deferred_with_a_timeout(self): + clock = self.patch(twisted_module, "reactor", Clock()) + dvalue = DeferredValue() + waiter = dvalue.get(10) + self.assertThat(waiter, IsUnfiredDeferred()) + clock.advance(9) + self.assertThat(waiter, IsUnfiredDeferred()) + clock.advance(1) + self.assertRaises(CancelledError, extract_result, waiter) + + def test__set_notifies_all_waiters(self): + dvalue = DeferredValue() + waiter1 = dvalue.get() + waiter2 = dvalue.get() + dvalue.set(sentinel.value) + self.expectThat(extract_result(waiter1), Is(sentinel.value)) + self.expectThat(extract_result(waiter2), Is(sentinel.value)) + + def test__set_notifies_all_waiters_that_have_not_timed_out(self): + clock = self.patch(twisted_module, "reactor", Clock()) + dvalue = DeferredValue() + waiter0 = dvalue.get() + waiter1 = dvalue.get(1) + waiter2 = dvalue.get(3) + clock.advance(2) + dvalue.set(sentinel.value) + self.expectThat(extract_result(waiter0), Is(sentinel.value)) + self.expectThat(extract_result(waiter2), Is(sentinel.value)) + self.assertRaises(CancelledError, extract_result, waiter1) + + def test__get_after_set_returns_the_value(self): + dvalue = DeferredValue() + dvalue.set(sentinel.value) + waiter = dvalue.get() + self.expectThat(extract_result(waiter), Is(sentinel.value)) + + def test__get_can_be_cancelled(self): + dvalue = DeferredValue() + waiter = dvalue.get() + waiter.cancel() + self.assertRaises(CancelledError, extract_result, waiter) + self.assertEqual(set(), dvalue.waiters) + + def test__set_can_only_be_called_once(self): + dvalue = DeferredValue() + dvalue.set(sentinel.value) + self.assertRaises(AlreadyCalledError, dvalue.set, sentinel.foobar) + + def test__cancel_stops_everything(self): + dvalue = DeferredValue() + waiter = dvalue.get() + dvalue.cancel() + self.assertRaises(CancelledError, extract_result, waiter) + self.assertRaises(CancelledError, extract_result, dvalue.get()) + self.assertRaises(AlreadyCalledError, dvalue.set, sentinel.value) + + def test__cancel_can_be_called_multiple_times(self): + dvalue = DeferredValue() + dvalue.cancel() + self.assertRaises(AlreadyCalledError, dvalue.set, sentinel.value) + dvalue.cancel() + self.assertRaises(AlreadyCalledError, dvalue.set, sentinel.value) + + def test__cancel_does_nothing_if_value_already_set(self): + dvalue = DeferredValue() + dvalue.set(sentinel.value) + dvalue.cancel() + self.assertEqual(sentinel.value, extract_result(dvalue.get())) + + def test__set_exception_results_in_a_callback(self): + exception = factory.make_exception() + dvalue = DeferredValue() + dvalue.set(exception) + self.assertIs(exception, dvalue.value) + + def test__set_failure_results_in_an_errback(self): + exception_type = factory.make_exception_type() + dvalue = DeferredValue() + dvalue.set(Failure(exception_type())) + self.assertRaises(exception_type, extract_result, dvalue.get()) + + def test__fail_results_in_an_errback(self): + exception_type = factory.make_exception_type() + dvalue = DeferredValue() + dvalue.fail(exception_type()) + self.assertRaises(exception_type, extract_result, dvalue.get()) + + def test__fail_None_results_in_an_errback_with_current_exception(self): + exception_type = factory.make_exception_type() + dvalue = DeferredValue() + try: + raise exception_type() + except exception_type: + dvalue.fail() + self.assertRaises(exception_type, extract_result, dvalue.get()) + + def test__fail_can_only_be_called_once(self): + exception = factory.make_exception() + dvalue = DeferredValue() + dvalue.fail(exception) + self.assertRaises(AlreadyCalledError, dvalue.fail, exception) + + def test__value_is_not_available_until_set(self): + dvalue = DeferredValue() + self.assertRaises(AttributeError, lambda: dvalue.value) + + def test__capture_captures_callback(self): + dvalue = DeferredValue() + d = Deferred() + dvalue.capture(d) + waiter = dvalue.get() + self.assertThat(waiter, IsUnfiredDeferred()) + d.callback(sentinel.result) + self.assertEqual(sentinel.result, extract_result(waiter)) + self.assertIsNone(extract_result(d)) + + def test__capture_captures_errback(self): + dvalue = DeferredValue() + d = Deferred() + dvalue.capture(d) + waiter = dvalue.get() + self.assertThat(waiter, IsUnfiredDeferred()) + exception = factory.make_exception() + d.errback(exception) + self.assertRaises(type(exception), extract_result, waiter) + self.assertIsNone(extract_result(d)) + + def test__observe_observes_callback(self): + dvalue = DeferredValue() + d = Deferred() + dvalue.observe(d) + waiter = dvalue.get() + self.assertThat(waiter, IsUnfiredDeferred()) + d.callback(sentinel.result) + self.assertEqual(sentinel.result, extract_result(waiter)) + self.assertEqual(sentinel.result, extract_result(d)) + + def test__observe_observes_errback(self): + dvalue = DeferredValue() + d = Deferred() + dvalue.observe(d) + waiter = dvalue.get() + self.assertThat(waiter, IsUnfiredDeferred()) + exception = factory.make_exception() + d.errback(exception) + self.assertRaises(type(exception), extract_result, waiter) + self.assertRaises(type(exception), extract_result, d) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_url.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_url.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_url.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_url.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,107 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Test utilities for URL handling.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +from random import randint + +from maastesting.factory import factory +from maastesting.testcase import MAASTestCase +from provisioningserver.utils.url import compose_URL + + +class TestComposeURL(MAASTestCase): + + def make_path(self): + """Return an arbitrary URL path part.""" + return '%s/%s' % (factory.make_name('root'), factory.make_name('sub')) + + def make_network_interface(self): + return 'eth%d' % randint(0, 100) + + def test__inserts_IPv4(self): + ip = factory.make_ipv4_address() + path = self.make_path() + self.assertEqual( + 'http://%s/%s' % (ip, path), + compose_URL('http:///%s' % path, ip)) + + def test__inserts_IPv6_with_brackets(self): + ip = factory.make_ipv6_address() + path = self.make_path() + self.assertEqual( + 'http://[%s]/%s' % (ip, path), + compose_URL('http:///%s' % path, ip)) + + def test__escapes_IPv6_zone_index(self): + ip = factory.make_ipv6_address() + zone = self.make_network_interface() + hostname = '%s%%%s' % (ip, zone) + path = self.make_path() + self.assertEqual( + 'http://[%s%%25%s]/%s' % (ip, zone, path), + compose_URL('http:///%s' % path, hostname)) + + def test__inserts_bracketed_IPv6_unchanged(self): + ip = factory.make_ipv6_address() + hostname = '[%s]' % ip + path = self.make_path() + self.assertEqual( + 'http://%s/%s' % (hostname, path), + compose_URL('http:///%s' % path, hostname)) + + def test__does_not_escape_bracketed_IPv6_zone_index(self): + ip = factory.make_ipv6_address() + zone = self.make_network_interface() + path = self.make_path() + hostname = '[%s%%25%s]' % (ip, zone) + self.assertEqual( + 'http://%s/%s' % (hostname, path), + compose_URL('http:///%s' % path, hostname)) + + def test__inserts_hostname(self): + hostname = factory.make_name('host') + path = self.make_path() + self.assertEqual( + 'http://%s/%s' % (hostname, path), + compose_URL('http:///%s' % path, hostname)) + + def test__preserves_query(self): + ip = factory.make_ipv4_address() + key = factory.make_name('key') + value = factory.make_name('value') + self.assertEqual( + 'https://%s?%s=%s' % (ip, key, value), + compose_URL('https://?%s=%s' % (key, value), ip)) + + def test__preserves_port_with_IPv4(self): + ip = factory.make_ipv4_address() + port = factory.pick_port() + self.assertEqual( + 'https://%s:%s/' % (ip, port), + compose_URL('https://:%s/' % port, ip)) + + def test__preserves_port_with_IPv6(self): + ip = factory.make_ipv6_address() + port = factory.pick_port() + self.assertEqual( + 'https://[%s]:%s/' % (ip, port), + compose_URL('https://:%s/' % port, ip)) + + def test__preserves_port_with_hostname(self): + hostname = factory.make_name('host') + port = factory.pick_port() + self.assertEqual( + 'https://%s:%s/' % (hostname, port), + compose_URL('https://:%s/' % port, hostname)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_utils.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_utils.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_utils.py 2014-09-03 14:18:31.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_utils.py 2015-07-10 01:27:14.000000000 +0000 @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). @@ -15,104 +14,51 @@ __metaclass__ = type __all__ = [] -from argparse import ( - ArgumentParser, - Namespace, - ) -import doctest +from collections import Iterator +from cStringIO import StringIO +import json import os -from random import randint -import re -from shutil import rmtree -import stat -import StringIO -import subprocess -from subprocess import ( - CalledProcessError, - PIPE, - Popen, - ) -import sys -import tempfile +from random import choice from textwrap import dedent -import time -import types -from crochet import EventualResult -from fixtures import ( - EnvironmentVariableFixture, - FakeLogger, - ) -from lxml import etree -from maastesting import ( - bindir, - root, - ) +from fixtures import EnvironmentVariableFixture +from maastesting import root from maastesting.factory import factory -from maastesting.fakemethod import FakeMethod from maastesting.matchers import MockCalledOnceWith -from maastesting.testcase import MAASTestCase +from maastesting.testcase import ( + MAASTestCase, + MAASTwistedRunTest, + ) from mock import ( Mock, sentinel, ) -import netifaces -from netifaces import ( - AF_LINK, - AF_INET, - AF_INET6, - ) import provisioningserver +from provisioningserver.rpc import region +from provisioningserver.rpc.exceptions import NodeAlreadyExists +from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture +from provisioningserver.testing.testcase import PservTestCase +import provisioningserver.utils from provisioningserver.utils import ( - ActionScript, - asynchronous, - atomic_write, - AtomicWriteScript, - call_and_check, - call_capture_and_check, classify, - ensure_dir, - ExternalProcessError, + create_node, + escape_py_literal, filter_dict, - find_ip_via_arp, - find_mac_via_arp, - get_all_interface_addresses, - get_mtime, - incremental_write, + flatten, + get_cluster_config, locate_config, maas_custom_config_markers, - MainScript, parse_key_value_file, - pick_new_mtime, - escape_py_literal, - read_text_file, Safe, ShellTemplate, - sudo_write_file, - synchronous, - tempdir, - try_match_xpath, write_custom_config_section, - write_text_file, ) -from testscenarios import multiply_scenarios -from testtools.deferredruntest import AsynchronousDeferredRunTest from testtools.matchers import ( DirExists, - DocTestMatches, EndsWith, - FileContains, - FileExists, IsInstance, - MatchesException, - MatchesStructure, - Not, - Raises, - StartsWith, ) -from testtools.testcase import ExpectedException -from twisted.internet.defer import inlineCallbacks -from twisted.internet.threads import deferToThread +from twisted.internet import defer def get_branch_dir(*path): @@ -140,13 +86,13 @@ self.assertTrue(os.path.isabs(locate_config())) def test_locates_config_file(self): - filename = factory.getRandomString() + filename = factory.make_string() self.assertEqual( get_branch_dir('etc/maas/', filename), locate_config(filename)) def test_locates_full_path(self): - path = [factory.getRandomString() for counter in range(3)] + path = [factory.make_string() for counter in range(3)] self.assertEqual( get_branch_dir('etc/maas/', *path), locate_config(*path)) @@ -190,58 +136,6 @@ self.assertEqual(items, filter_dict(items, keys)) -class TestInterfaceFunctions(MAASTestCase): - """Tests for functions relating to network interfaces.""" - - example_interfaces = { - 'eth0': { - AF_LINK: [{'addr': '00:1d:ba:86:aa:fe', - 'broadcast': 'ff:ff:ff:ff:ff:ff'}], - }, - 'lo': { - AF_INET: [{'addr': '127.0.0.1', - 'netmask': '255.0.0.0', - 'peer': '127.0.0.1'}], - AF_INET6: [{'addr': '::1', - 'netmask': 'ff:ff:ff:ff:ff:ff'}], - AF_LINK: [{'addr': '00:00:00:00:00:00', - 'peer': '00:00:00:00:00:00'}], - }, - 'lxcbr0': { - AF_INET: [{'addr': '10.0.3.1', - 'broadcast': '10.0.3.255', - 'netmask': '255.255.255.0'}], - AF_INET6: [{'addr': 'fe80::9894:6fff:fe8b:22%lxcbr0', - 'netmask': 'ffff:ffff:ffff:ffff::'}], - AF_LINK: [{'addr': '9a:94:6f:8b:00:22', - 'broadcast': 'ff:ff:ff:ff:ff:ff'}]}, - 'tun0': { - AF_INET: [{'addr': '10.99.244.250', - 'netmask': '255.255.255.255', - 'peer': '10.99.244.249'}], - }, - 'wlan0': { - AF_INET: [{'addr': '10.155.1.159', - 'broadcast': '10.155.31.255', - 'netmask': '255.255.224.0'}], - AF_INET6: [{'addr': 'fe80::221:5dff:fe85:d2e4%wlan0', - 'netmask': 'ffff:ffff:ffff:ffff::'}], - AF_LINK: [{'addr': '00:21:5d:85:dAF_INET:e4', - 'broadcast': 'ff:ff:ff:ff:ff:ff'}], - }, - } - - def test_get_all_interface_addresses(self): - # get_all_interface_addresses() returns the IPv4 addresses associated - # with each of the network devices present on the system, as reported - # by netifaces. IPv6 is ignored. - self.patch(netifaces, "interfaces", self.example_interfaces.keys) - self.patch(netifaces, "ifaddresses", self.example_interfaces.get) - self.assertEqual( - ["127.0.0.1", "10.0.3.1", "10.99.244.250", "10.155.1.159"], - list(get_all_interface_addresses())) - - class TestSafe(MAASTestCase): """Test `Safe`.""" @@ -251,174 +145,11 @@ self.assertIs(something, safe.value) def test_repr(self): - string = factory.getRandomString() + string = factory.make_string() safe = Safe(string) self.assertEqual("" % string, repr(safe)) -class TestWriteAtomic(MAASTestCase): - """Test `atomic_write`.""" - - def test_atomic_write_overwrites_dest_file(self): - content = factory.getRandomString() - filename = self.make_file(contents=factory.getRandomString()) - atomic_write(content, filename) - self.assertThat(filename, FileContains(content)) - - def test_atomic_write_does_not_overwrite_file_if_overwrite_false(self): - content = factory.getRandomString() - random_content = factory.getRandomString() - filename = self.make_file(contents=random_content) - atomic_write(content, filename, overwrite=False) - self.assertThat(filename, FileContains(random_content)) - - def test_atomic_write_writes_file_if_no_file_present(self): - filename = os.path.join(self.make_dir(), factory.getRandomString()) - content = factory.getRandomString() - atomic_write(content, filename, overwrite=False) - self.assertThat(filename, FileContains(content)) - - def test_atomic_write_does_not_leak_temp_file_when_not_overwriting(self): - # If the file is not written because it already exists and - # overwriting was disabled, atomic_write does not leak its - # temporary file. - filename = self.make_file() - atomic_write(factory.getRandomString(), filename, overwrite=False) - self.assertEqual( - [os.path.basename(filename)], - os.listdir(os.path.dirname(filename))) - - def test_atomic_write_does_not_leak_temp_file_on_failure(self): - # If the overwrite fails, atomic_write does not leak its - # temporary file. - self.patch(os, 'rename', Mock(side_effect=OSError())) - filename = self.make_file() - with ExpectedException(OSError): - atomic_write(factory.getRandomString(), filename) - self.assertEqual( - [os.path.basename(filename)], - os.listdir(os.path.dirname(filename))) - - def test_atomic_write_sets_permissions(self): - atomic_file = self.make_file() - # Pick an unusual mode that is also likely to fall outside our - # umask. We want this mode set, not treated as advice that may - # be tightened up by umask later. - mode = 0323 - atomic_write(factory.getRandomString(), atomic_file, mode=mode) - self.assertEqual(mode, stat.S_IMODE(os.stat(atomic_file).st_mode)) - - def test_atomic_write_sets_permissions_before_moving_into_place(self): - - recorded_modes = [] - - def record_mode(source, dest): - """Stub for os.rename: get source file's access mode.""" - recorded_modes.append(os.stat(source).st_mode) - - self.patch(os, 'rename', Mock(side_effect=record_mode)) - playground = self.make_dir() - atomic_file = os.path.join(playground, factory.make_name('atomic')) - mode = 0323 - atomic_write(factory.getRandomString(), atomic_file, mode=mode) - [recorded_mode] = recorded_modes - self.assertEqual(mode, stat.S_IMODE(recorded_mode)) - - def test_atomic_write_sets_OSError_filename_if_undefined(self): - # When the filename attribute of an OSError is undefined when - # attempting to create a temporary file, atomic_write fills it in with - # a representative filename, similar to the specification required by - # mktemp(1). - mock_mkstemp = self.patch(tempfile, "mkstemp") - mock_mkstemp.side_effect = OSError() - filename = os.path.join("directory", "basename") - error = self.assertRaises(OSError, atomic_write, "content", filename) - self.assertEqual( - os.path.join("directory", ".basename.XXXXXX.tmp"), - error.filename) - - def test_atomic_write_does_not_set_OSError_filename_if_defined(self): - # When the filename attribute of an OSError is defined when attempting - # to create a temporary file, atomic_write leaves it alone. - mock_mkstemp = self.patch(tempfile, "mkstemp") - mock_mkstemp.side_effect = OSError() - mock_mkstemp.side_effect.filename = factory.make_name("filename") - filename = os.path.join("directory", "basename") - error = self.assertRaises(OSError, atomic_write, "content", filename) - self.assertEqual( - mock_mkstemp.side_effect.filename, - error.filename) - - -class TestIncrementalWrite(MAASTestCase): - """Test `incremental_write`.""" - - def test_incremental_write_increments_modification_time(self): - content = factory.getRandomString() - filename = self.make_file(contents=factory.getRandomString()) - # Pretend that this file is older than it is. So that - # incrementing its mtime won't put it in the future. - old_mtime = os.stat(filename).st_mtime - 10 - os.utime(filename, (old_mtime, old_mtime)) - incremental_write(content, filename) - self.assertAlmostEqual( - os.stat(filename).st_mtime, old_mtime + 1, delta=0.01) - - def test_incremental_write_sets_permissions(self): - atomic_file = self.make_file() - mode = 0323 - incremental_write(factory.getRandomString(), atomic_file, mode=mode) - self.assertEqual(mode, stat.S_IMODE(os.stat(atomic_file).st_mode)) - - -class TestGetMTime(MAASTestCase): - """Test `get_mtime`.""" - - def test_get_mtime_returns_None_for_nonexistent_file(self): - nonexistent_file = os.path.join( - self.make_dir(), factory.make_name('nonexistent-file')) - self.assertIsNone(get_mtime(nonexistent_file)) - - def test_get_mtime_returns_mtime(self): - existing_file = self.make_file() - mtime = os.stat(existing_file).st_mtime - randint(0, 100) - os.utime(existing_file, (mtime, mtime)) - # Some small rounding/representation errors can happen here. - # That's just the way of floating-point numbers. According to - # Gavin there's a conversion to fixed-point along the way, which - # would raise representability issues. - self.assertAlmostEqual(mtime, get_mtime(existing_file), delta=0.00001) - - def test_get_mtime_passes_on_other_error(self): - forbidden_file = self.make_file() - self.patch(os, 'stat', FakeMethod(failure=OSError("Forbidden file"))) - self.assertRaises(OSError, get_mtime, forbidden_file) - - -class TestPickNewMTime(MAASTestCase): - """Test `pick_new_mtime`.""" - - def test_pick_new_mtime_applies_starting_age_to_new_file(self): - before = time.time() - starting_age = randint(0, 5) - recommended_age = pick_new_mtime(None, starting_age=starting_age) - now = time.time() - self.assertAlmostEqual( - now - starting_age, - recommended_age, - delta=(now - before)) - - def test_pick_new_mtime_increments_mtime_if_possible(self): - past = time.time() - 2 - self.assertEqual(past + 1, pick_new_mtime(past)) - - def test_pick_new_mtime_refuses_to_move_mtime_into_the_future(self): - # Race condition: this will fail if the test gets held up for - # a second between readings of the clock. - now = time.time() - self.assertEqual(now, pick_new_mtime(now)) - - class WriteCustomConfigSectionTest(MAASTestCase): """Test `write_custom_config_section`.""" @@ -572,46 +303,6 @@ write_custom_config_section(original, new_custom_section)) -class SudoWriteFileTest(MAASTestCase): - """Testing for `sudo_write_file`.""" - - def patch_popen(self, return_value=0): - process = Mock() - process.returncode = return_value - process.communicate = Mock(return_value=('output', 'error output')) - self.patch( - provisioningserver.utils, 'Popen', Mock(return_value=process)) - return process - - def test_calls_atomic_write(self): - self.patch_popen() - path = os.path.join(self.make_dir(), factory.make_name('file')) - contents = factory.getRandomString() - - sudo_write_file(path, contents) - - self.assertThat(provisioningserver.utils.Popen, MockCalledOnceWith([ - 'sudo', '-n', 'maas-provision', 'atomic-write', - '--filename', path, '--mode', '0644', - ], - stdin=PIPE)) - - def test_encodes_contents(self): - process = self.patch_popen() - contents = factory.getRandomString() - encoding = 'utf-16' - sudo_write_file(self.make_file(), contents, encoding=encoding) - self.assertThat( - process.communicate, - MockCalledOnceWith(contents.encode(encoding))) - - def test_catches_failures(self): - self.patch_popen(1) - self.assertRaises( - CalledProcessError, - sudo_write_file, self.make_file(), factory.getRandomString()) - - class ParseConfigTest(MAASTestCase): """Testing for `parse_key_value_file`.""" @@ -678,519 +369,6 @@ self.assertEqual(expected, observed) -class TestActionScript(MAASTestCase): - """Test `ActionScript`.""" - - factory = ActionScript - - def setUp(self): - super(TestActionScript, self).setUp() - # ActionScript.setup() is not safe to run in the test suite. - self.patch(ActionScript, "setup", lambda self: None) - # ArgumentParser sometimes likes to print to stdout/err. Use - # StringIO.StringIO to be relaxed about bytes/unicode (argparse uses - # bytes). When moving to Python 3 this will need to be tightened up. - self.patch(sys, "stdout", StringIO.StringIO()) - self.patch(sys, "stderr", StringIO.StringIO()) - - def test_init(self): - description = factory.getRandomString() - script = self.factory(description) - self.assertIsInstance(script.parser, ArgumentParser) - self.assertEqual(description, script.parser.description) - - def test_register(self): - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: ( - self.assertIsInstance(parser, ArgumentParser)) - handler.run = lambda args: ( - self.assertIsInstance(args, int)) - script = self.factory("Description") - script.register("slay", handler) - self.assertIn("slay", script.subparsers.choices) - action_parser = script.subparsers.choices["slay"] - self.assertIsInstance(action_parser, ArgumentParser) - - def test_register_without_add_arguments(self): - # ActionScript.register will crash if the handler has no - # add_arguments() callable. - handler = types.ModuleType(b"handler") - handler.run = lambda args: None - script = self.factory("Description") - error = self.assertRaises( - AttributeError, script.register, "decapitate", handler) - self.assertIn("'add_arguments'", "%s" % error) - - def test_register_without_run(self): - # ActionScript.register will crash if the handler has no run() - # callable. - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: None - script = self.factory("Description") - error = self.assertRaises( - AttributeError, script.register, "decapitate", handler) - self.assertIn("'run'", "%s" % error) - - def test_call(self): - handler_calls = [] - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: None - handler.run = handler_calls.append - script = self.factory("Description") - script.register("amputate", handler) - error = self.assertRaises(SystemExit, script, ["amputate"]) - self.assertEqual(0, error.code) - self.assertEqual(1, len(handler_calls)) - self.assertIsInstance(handler_calls[0], Namespace) - - def test_call_invalid_choice(self): - script = self.factory("Description") - self.assertRaises(SystemExit, script, ["disembowel"]) - self.assertIn(b"invalid choice", sys.stderr.getvalue()) - - def test_call_with_exception(self): - # Most exceptions from run() are propagated. - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: None - handler.run = lambda args: 0 / 0 - script = self.factory("Description") - script.register("eviscerate", handler) - self.assertRaises(ZeroDivisionError, script, ["eviscerate"]) - - def test_call_with_process_exception(self): - # CalledProcessError is converted into SystemExit. - exception = CalledProcessError( - randint(0, 256), [factory.getRandomString()], - factory.getRandomString().encode("ascii")) - - def raise_exception(): - raise exception - - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: None - handler.run = lambda args: raise_exception() - script = self.factory("Description") - script.register("sever", handler) - error = self.assertRaises(SystemExit, script, ["sever"]) - self.assertEqual(exception.returncode, error.code) - - def test_call_with_keyboard_interrupt(self): - # KeyboardInterrupt is silently converted into SystemExit, with an - # exit code of 1. - - def raise_exception(): - raise KeyboardInterrupt() - - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: None - handler.run = lambda args: raise_exception() - script = self.factory("Description") - script.register("smash", handler) - error = self.assertRaises(SystemExit, script, ["smash"]) - self.assertEqual(1, error.code) - - -class TestMainScript(TestActionScript): - - factory = MainScript - - def test_default_arguments(self): - # MainScript accepts a --config-file parameter. The value of this is - # passed through into the args namespace object as config_file. - handler_calls = [] - handler = types.ModuleType(b"handler") - handler.add_arguments = lambda parser: None - handler.run = handler_calls.append - script = self.factory("Description") - script.register("dislocate", handler) - dummy_config_file = factory.make_name("config-file") - # --config-file is specified before the action. - args = ["--config-file", dummy_config_file, "dislocate"] - error = self.assertRaises(SystemExit, script, args) - self.assertEqual(0, error.code) - namespace = handler_calls[0] - self.assertEqual( - {"config_file": dummy_config_file, "handler": handler}, - vars(namespace)) - - -class TestAtomicWriteScript(MAASTestCase): - - def setUp(self): - super(TestAtomicWriteScript, self).setUp() - # Silence ArgumentParser. - self.patch(sys, "stdout", StringIO.StringIO()) - self.patch(sys, "stderr", StringIO.StringIO()) - - def get_parser(self): - parser = ArgumentParser() - AtomicWriteScript.add_arguments(parser) - return parser - - def get_and_run_mocked_script(self, content, filename, *args): - self.patch(sys, "stdin", StringIO.StringIO(content)) - parser = self.get_parser() - parsed_args = parser.parse_args(*args) - mocked_atomic_write = self.patch( - provisioningserver.utils, 'atomic_write') - AtomicWriteScript.run(parsed_args) - return mocked_atomic_write - - def test_arg_setup(self): - parser = self.get_parser() - filename = factory.getRandomString() - args = parser.parse_args(( - '--no-overwrite', - '--filename', filename, - '--mode', "111")) - self.assertThat( - args, MatchesStructure.byEquality( - no_overwrite=True, - filename=filename, - mode="111")) - - def test_filename_arg_required(self): - parser = self.get_parser() - self.assertRaises(SystemExit, parser.parse_args, ('--no-overwrite',)) - - def test_no_overwrite_defaults_to_false(self): - parser = self.get_parser() - filename = factory.getRandomString() - args = parser.parse_args(('--filename', filename)) - self.assertFalse(args.no_overwrite) - - def test_script_executable(self): - content = factory.getRandomString() - script = [os.path.join(bindir, "maas-provision"), 'atomic-write'] - target_file = self.make_file() - script.extend(('--filename', target_file, '--mode', '615')) - cmd = Popen( - script, stdin=PIPE, - env=dict(PYTHONPATH=":".join(sys.path))) - cmd.communicate(content) - self.assertThat(target_file, FileContains(content)) - self.assertEqual(0615, stat.S_IMODE(os.stat(target_file).st_mode)) - - def test_passes_overwrite_flag(self): - content = factory.getRandomString() - filename = factory.getRandomString() - mocked_atomic_write = self.get_and_run_mocked_script( - content, filename, - ('--filename', filename, '--no-overwrite')) - - self.assertThat( - mocked_atomic_write, - MockCalledOnceWith(content, filename, mode=0600, overwrite=False)) - - def test_passes_mode_flag(self): - content = factory.getRandomString() - filename = factory.getRandomString() - # Mode that's unlikely to occur in the wild. - mode = 0377 - mocked_atomic_write = self.get_and_run_mocked_script( - content, filename, - ('--filename', filename, '--mode', oct(mode))) - - self.assertThat( - mocked_atomic_write, - MockCalledOnceWith(content, filename, mode=mode, overwrite=True)) - - def test_default_mode(self): - content = factory.getRandomString() - filename = factory.getRandomString() - mocked_atomic_write = self.get_and_run_mocked_script( - content, filename, - ('--filename', filename)) - - self.assertThat( - mocked_atomic_write, - MockCalledOnceWith(content, filename, mode=0600, overwrite=True)) - - -class TestEnsureDir(MAASTestCase): - def test_succeeds_if_directory_already_existed(self): - path = self.make_dir() - ensure_dir(path) - self.assertThat(path, DirExists()) - - def test_fails_if_path_is_already_a_file(self): - path = self.make_file() - self.assertRaises(OSError, ensure_dir, path) - self.assertThat(path, FileExists()) - - def test_creates_dir_if_not_present(self): - path = os.path.join(self.make_dir(), factory.make_name()) - ensure_dir(path) - self.assertThat(path, DirExists()) - - def test_passes_on_other_errors(self): - not_a_dir = self.make_file() - self.assertRaises( - OSError, - ensure_dir, - os.path.join(not_a_dir, factory.make_name('impossible'))) - - def test_creates_multiple_layers_of_directories_if_needed(self): - path = os.path.join( - self.make_dir(), factory.make_name('subdir'), - factory.make_name('sbusubdir')) - ensure_dir(path) - self.assertThat(path, DirExists()) - - -class TestTempDir(MAASTestCase): - def test_creates_real_fresh_directory(self): - stored_text = factory.getRandomString() - filename = factory.make_name('test-file') - with tempdir() as directory: - self.assertThat(directory, DirExists()) - write_text_file(os.path.join(directory, filename), stored_text) - retrieved_text = read_text_file(os.path.join(directory, filename)) - files = os.listdir(directory) - - self.assertEqual(stored_text, retrieved_text) - self.assertEqual([filename], files) - - def test_creates_unique_directory(self): - with tempdir() as dir1, tempdir() as dir2: - pass - self.assertNotEqual(dir1, dir2) - - def test_cleans_up_on_successful_exit(self): - with tempdir() as directory: - file_path = factory.make_file(directory) - - self.assertThat(directory, Not(DirExists())) - self.assertThat(file_path, Not(FileExists())) - - def test_cleans_up_on_exception_exit(self): - class DeliberateFailure(Exception): - pass - - with ExpectedException(DeliberateFailure): - with tempdir() as directory: - file_path = factory.make_file(directory) - raise DeliberateFailure("Exiting context by exception") - - self.assertThat(directory, Not(DirExists())) - self.assertThat(file_path, Not(FileExists())) - - def test_tolerates_disappearing_dir(self): - with tempdir() as directory: - rmtree(directory) - - self.assertThat(directory, Not(DirExists())) - - def test_uses_location(self): - temp_location = self.make_dir() - with tempdir(location=temp_location) as directory: - self.assertThat(directory, DirExists()) - location_listing = os.listdir(temp_location) - - self.assertNotEqual(temp_location, directory) - self.assertThat(directory, StartsWith(temp_location + os.path.sep)) - self.assertIn(os.path.basename(directory), location_listing) - self.assertThat(temp_location, DirExists()) - self.assertThat(directory, Not(DirExists())) - - def test_yields_unicode(self): - with tempdir() as directory: - pass - - self.assertIsInstance(directory, unicode) - - def test_accepts_unicode_from_mkdtemp(self): - fake_dir = os.path.join(self.make_dir(), factory.make_name('tempdir')) - self.assertIsInstance(fake_dir, unicode) - self.patch(tempfile, 'mkdtemp').return_value = fake_dir - - with tempdir() as directory: - pass - - self.assertEqual(fake_dir, directory) - self.assertIsInstance(directory, unicode) - - def test_decodes_bytes_from_mkdtemp(self): - encoding = 'utf-16' - self.patch(sys, 'getfilesystemencoding').return_value = encoding - fake_dir = os.path.join(self.make_dir(), factory.make_name('tempdir')) - self.patch(tempfile, 'mkdtemp').return_value = fake_dir.encode( - encoding) - - with tempdir() as directory: - pass - - self.assertEqual(fake_dir, directory) - self.assertIsInstance(directory, unicode) - - def test_uses_prefix(self): - prefix = factory.getRandomString(3) - with tempdir(prefix=prefix) as directory: - pass - - self.assertThat(os.path.basename(directory), StartsWith(prefix)) - - def test_uses_suffix(self): - suffix = factory.getRandomString(3) - with tempdir(suffix=suffix) as directory: - pass - - self.assertThat(os.path.basename(directory), EndsWith(suffix)) - - def test_restricts_access(self): - with tempdir() as directory: - mode = os.stat(directory).st_mode - self.assertEqual( - stat.S_IMODE(mode), - stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) - - -class TestReadTextFile(MAASTestCase): - def test_reads_file(self): - text = factory.getRandomString() - self.assertEqual(text, read_text_file(self.make_file(contents=text))) - - def test_defaults_to_utf8(self): - # Test input: "registered trademark" (ringed R) symbol. - text = '\xae' - self.assertEqual( - text, - read_text_file(self.make_file(contents=text.encode('utf-8')))) - - def test_uses_given_encoding(self): - # Test input: "registered trademark" (ringed R) symbol. - text = '\xae' - self.assertEqual( - text, - read_text_file( - self.make_file(contents=text.encode('utf-16')), - encoding='utf-16')) - - -class TestWriteTextFile(MAASTestCase): - def test_creates_file(self): - path = os.path.join(self.make_dir(), factory.make_name('text')) - text = factory.getRandomString() - write_text_file(path, text) - self.assertThat(path, FileContains(text)) - - def test_overwrites_file(self): - path = self.make_file(contents="original text") - text = factory.getRandomString() - write_text_file(path, text) - self.assertThat(path, FileContains(text)) - - def test_defaults_to_utf8(self): - path = self.make_file() - # Test input: "registered trademark" (ringed R) symbol. - text = '\xae' - write_text_file(path, text) - self.assertThat(path, FileContains(text.encode('utf-8'))) - - def test_uses_given_encoding(self): - path = self.make_file() - # Test input: "registered trademark" (ringed R) symbol. - text = '\xae' - write_text_file(path, text, encoding='utf-16') - self.assertThat(path, FileContains(text.encode('utf-16'))) - - -class TestTryMatchXPathScenarios(MAASTestCase): - - doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE - - def scenario(name, xpath, doc, expected_result, expected_log=""): - """Return a scenario (for `testscenarios`) to test `try_match_xpath`. - - This is a convenience function to reduce the amount of - boilerplate when constructing `scenarios_inputs` later on. - - The scenario it constructs defines an XML document, and XPath - expression, the expectation as to whether it will match or - not, and the expected log output. - """ - doc = etree.fromstring(doc).getroottree() - return name, dict( - xpath=xpath, doc=doc, expected_result=expected_result, - expected_log=dedent(expected_log)) - - # Exercise try_match_xpath with a variety of different inputs. - scenarios_inputs = ( - scenario( - "expression matches", - "/foo", "", True), - scenario( - "expression does not match", - "/foo", "", False), - scenario( - "text expression matches", - "/foo/text()", 'bar', True), - scenario( - "text expression does not match", - "/foo/text()", '', False), - scenario( - "string expression matches", - "string()", 'bar', True), - scenario( - "string expression does not match", - "string()", '', False), - scenario( - "unrecognised namespace", - "/foo:bar", '', False, - expected_log="""\ - Invalid expression: /foo:bar - Traceback (most recent call last): - ... - XPathEvalError: Undefined namespace prefix - """), - ) - - # Exercise try_match_xpath with and without compiled XPath - # expressions. - scenarios_xpath_compiler = ( - ("xpath-compiler=XPath", dict(xpath_compile=etree.XPath)), - ("xpath-compiler=None", dict(xpath_compile=lambda expr: expr)), - ) - - # Exercise try_match_xpath with and without documents wrapped in - # an XPathDocumentEvaluator. - scenarios_doc_compiler = ( - ("doc-compiler=XPathDocumentEvaluator", dict( - doc_compile=etree.XPathDocumentEvaluator)), - ("doc-compiler=None", dict(doc_compile=lambda doc: doc)), - ) - - scenarios = multiply_scenarios( - scenarios_inputs, scenarios_xpath_compiler, - scenarios_doc_compiler) - - def setUp(self): - super(TestTryMatchXPathScenarios, self).setUp() - self.logger = self.useFixture(FakeLogger()) - - def test(self): - xpath = self.xpath_compile(self.xpath) - doc = self.doc_compile(self.doc) - self.assertIs(self.expected_result, try_match_xpath(xpath, doc)) - self.assertThat( - self.logger.output, DocTestMatches( - self.expected_log, self.doctest_flags)) - - -class TestTryMatchXPath(MAASTestCase): - - def test_logs_to_specified_logger(self): - xpath = etree.XPath("/foo:bar") - doc = etree.XML("") - root_logger = self.useFixture(FakeLogger()) - callers_logger = Mock() - try_match_xpath(xpath, doc, callers_logger) - self.assertEqual("", root_logger.output) - self.assertThat( - callers_logger.exception, - MockCalledOnceWith("Invalid expression: %s", xpath.path)) - - class TestClassify(MAASTestCase): def test_no_subjects(self): @@ -1205,222 +383,8 @@ classify(is_even, subjects)) -class TestSubprocessWrappers(MAASTestCase): - """Tests for the subprocess.* wrapper functions.""" - - def test_call_and_check_returns_returncode(self): - self.patch(subprocess, 'check_call', FakeMethod(0)) - self.assertEqual(0, call_and_check('some_command')) - - def test_call_and_check_raises_ExternalProcessError_on_failure(self): - self.patch(subprocess, 'check_call').side_effect = ( - CalledProcessError('-1', 'some_command')) - error = self.assertRaises( - ExternalProcessError, call_and_check, "some command") - self.assertEqual('-1', error.returncode) - self.assertEqual('some_command', error.cmd) - - def test_call_capture_and_check_returns_returncode(self): - self.patch(subprocess, 'check_output', FakeMethod("Some output")) - self.assertEqual("Some output", call_capture_and_check('some_command')) - - def test_call_capture_and_check_raises_ExternalProcessError_on_fail(self): - self.patch(subprocess, 'check_output').side_effect = ( - CalledProcessError('-1', 'some_command', "Some output")) - error = self.assertRaises( - ExternalProcessError, call_capture_and_check, "some command") - self.assertEqual('-1', error.returncode) - self.assertEqual('some_command', error.cmd) - self.assertEqual("Some output", error.output) - - -class TestExternalProcessError(MAASTestCase): - """Tests for the ExternalProcessError class.""" - - def test_to_unicode_decodes_to_unicode(self): - # Byte strings are decoded as ASCII by _to_unicode(), replacing - # all non-ASCII characters with U+FFFD REPLACEMENT CHARACTERs. - byte_string = b"This string will be converted. \xe5\xb2\x81\xe5." - expected_unicode_string = ( - u"This string will be converted. \ufffd\ufffd\ufffd\ufffd.") - converted_string = ExternalProcessError._to_unicode(byte_string) - self.assertIsInstance(converted_string, unicode) - self.assertEqual(expected_unicode_string, converted_string) - - def test_to_unicode_defers_to_unicode_constructor(self): - # Unicode strings and non-byte strings are handed to unicode() - # to undergo Python's normal coercion strategy. (For unicode - # strings this is actually a no-op, but it's cheaper to do this - # than special-case unicode strings.) - self.assertEqual( - unicode(self), ExternalProcessError._to_unicode(self)) - - def test_to_ascii_encodes_to_bytes(self): - unicode_string = u"Thîs nøn-åßçií s†ring will be cönvërted" - expected_byte_string = b"Th?s n?n-???i? s?ring will be c?nv?rted" - converted_string = ExternalProcessError._to_ascii(unicode_string) - self.assertIsInstance(converted_string, bytes) - self.assertEqual(expected_byte_string, converted_string) - - def test_to_ascii_defers_to_bytes(self): - # Byte strings and non-unicode strings are handed to bytes() to - # undergo Python's normal coercion strategy. (For byte strings - # this is actually a no-op, but it's cheaper to do this than - # special-case byte strings.) - self.assertEqual(bytes(self), ExternalProcessError._to_ascii(self)) - - def test_to_ascii_removes_non_printable_chars(self): - # After conversion to a byte string, all non-printable and - # non-ASCII characters are replaced with question marks. - byte_string = b"*How* many roads\x01\x02\xb2\xfe" - expected_byte_string = b"*How* many roads????" - converted_string = ExternalProcessError._to_ascii(byte_string) - self.assertIsInstance(converted_string, bytes) - self.assertEqual(expected_byte_string, converted_string) - - def test__str__returns_bytes(self): - error = ExternalProcessError(returncode=-1, cmd="foo-bar") - self.assertIsInstance(error.__str__(), bytes) - - def test__unicode__returns_unicode(self): - error = ExternalProcessError(returncode=-1, cmd="foo-bar") - self.assertIsInstance(error.__unicode__(), unicode) - - def test__str__contains_output(self): - output = u"Hëré's søme øu†pût" - ascii_output = "H?r?'s s?me ?u?p?t" - error = ExternalProcessError( - returncode=-1, cmd="foo-bar", output=output) - self.assertIn(ascii_output, error.__str__()) - - def test__unicode__contains_output(self): - output = "Hëré's søme øu†pût" - error = ExternalProcessError( - returncode=-1, cmd="foo-bar", output=output) - self.assertIn(output, error.__unicode__()) - - -class TestFindIPViaARP(MAASTestCase): - def test_find_ip_via_arp(self): - sample = """Address HWtype HWaddress Flags Mask Iface - 192.168.100.20 (incomplete) virbr1 - 192.168.0.104 (incomplete) eth0 - 192.168.0.5 (incomplete) eth0 - 192.168.0.2 (incomplete) eth0 - 192.168.0.100 (incomplete) eth0 - 192.168.122.20 ether 52:54:00:02:86:4b C virbr0 - 192.168.0.4 (incomplete) eth0 - 192.168.0.1 ether 90:f6:52:f6:17:92 C eth0 - """ - - call_capture_and_check = self.patch( - provisioningserver.utils, 'call_capture_and_check') - call_capture_and_check.return_value = sample - ip_address_observed = find_ip_via_arp("90:f6:52:f6:17:92") - self.assertThat( - call_capture_and_check, - MockCalledOnceWith(['arp', '-n'])) - self.assertEqual("192.168.0.1", ip_address_observed) - - -class TestFindMACViaARP(MAASTestCase): - - def patch_call(self, output): - """Replace `call_capture_and_check` with one that returns `output`.""" - fake = self.patch(provisioningserver.utils, 'call_capture_and_check') - fake.return_value = output - return fake - - def test__resolves_IP_address_to_MAC(self): - sample = """\ - Address HWtype HWaddress Flags Mask Iface - 192.168.100.20 (incomplete) virbr1 - 192.168.0.104 (incomplete) eth0 - 192.168.0.5 (incomplete) eth0 - 192.168.0.2 (incomplete) eth0 - 192.168.0.100 (incomplete) eth0 - 192.168.122.20 ether 52:54:00:02:86:4b C virbr0 - 192.168.0.4 (incomplete) eth0 - 192.168.0.1 ether 90:f6:52:f6:17:92 C eth0 - """ - - call_capture_and_check = self.patch_call(sample) - mac_address_observed = find_mac_via_arp("192.168.122.20") - self.assertThat( - call_capture_and_check, - MockCalledOnceWith(['arp', '-n'])) - self.assertEqual("52:54:00:02:86:4b", mac_address_observed) - - def test__returns_consistent_output(self): - ip = factory.getRandomIPAddress() - macs = [ - '52:54:00:02:86:4b', - '90:f6:52:f6:17:92', - ] - lines = ['%s ether %s C eth0' % (ip, mac) for mac in macs] - self.patch_call('\n'.join(lines)) - one_result = find_mac_via_arp(ip) - self.patch_call('\n'.join(reversed(lines))) - other_result = find_mac_via_arp(ip) - - self.assertIn(one_result, macs) - self.assertEqual(one_result, other_result) - - -class TestAsynchronousDecorator(MAASTestCase): - - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) - - @asynchronous - def return_args(self, *args, **kwargs): - return args, kwargs - - def test_in_reactor_thread(self): - result = self.return_args(1, 2, three=3) - self.assertEqual(((1, 2), {"three": 3}), result) - - @inlineCallbacks - def test_in_other_thread(self): - def do_stuff_in_thread(): - result = self.return_args(3, 4, five=5) - self.assertThat(result, IsInstance(EventualResult)) - return result.wait() - # Call do_stuff_in_thread() from another thread. - result = yield deferToThread(do_stuff_in_thread) - # do_stuff_in_thread() waited for the result of return_args(). - # The arguments passed back match those passed in from - # do_stuff_in_thread(). - self.assertEqual(((3, 4), {"five": 5}), result) - - -class TestSynchronousDecorator(MAASTestCase): - - run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) - - @synchronous - def return_args(self, *args, **kwargs): - return args, kwargs - - def test_in_reactor_thread(self): - expected = MatchesException( - AssertionError, re.escape( - "Function return_args(...) must not be called " - "in the reactor thread.")) - self.assertThat(self.return_args, Raises(expected)) - - @inlineCallbacks - def test_in_other_thread(self): - def do_stuff_in_thread(): - return self.return_args(3, 4, five=5) - # Call do_stuff_in_thread() from another thread. - result = yield deferToThread(do_stuff_in_thread) - # do_stuff_in_thread() ran straight through, without - # modification. The arguments passed back match those passed in - # from do_stuff_in_thread(). - self.assertEqual(((3, 4), {"five": 5}), result) - - class TestQuotePyLiteral(MAASTestCase): + def test_uses_repr(self): string = factory.make_name('string') repr_mock = self.patch(provisioningserver.utils, 'repr') @@ -1437,3 +401,209 @@ value = escape_py_literal(string) self.assertThat(ascii_value.decode, MockCalledOnceWith('ascii')) self.assertEqual(value, output) + + +class TestCreateNode(PservTestCase): + + run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) + + def prepare_region_rpc(self): + fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) + protocol, connecting = fixture.makeEventLoop(region.CreateNode) + return protocol, connecting + + @defer.inlineCallbacks + def test_calls_create_node_rpc(self): + protocol, connecting = self.prepare_region_rpc() + self.addCleanup((yield connecting)) + protocol.CreateNode.return_value = defer.succeed( + {"system_id": factory.make_name("system-id")}) + + uuid = 'node-' + factory.make_UUID() + macs = sorted(factory.make_mac_address() for _ in range(3)) + arch = factory.make_name('architecture') + power_type = factory.make_name('power_type') + power_parameters = { + 'power_address': factory.make_ipv4_address(), + 'power_user': factory.make_name('power_user'), + 'power_pass': factory.make_name('power_pass'), + 'power_control': None, + 'system_id': uuid + } + get_cluster_uuid = self.patch( + provisioningserver.utils, 'get_cluster_uuid') + get_cluster_uuid.return_value = 'cluster-' + factory.make_UUID() + yield create_node( + macs, arch, power_type, power_parameters) + self.assertThat( + protocol.CreateNode, MockCalledOnceWith( + protocol, cluster_uuid=get_cluster_uuid.return_value, + architecture=arch, power_type=power_type, + power_parameters=json.dumps(power_parameters), + mac_addresses=macs)) + + @defer.inlineCallbacks + def test_returns_system_id_of_new_node(self): + protocol, connecting = self.prepare_region_rpc() + self.addCleanup((yield connecting)) + system_id = factory.make_name("system-id") + protocol.CreateNode.return_value = defer.succeed( + {"system_id": system_id}) + get_cluster_uuid = self.patch( + provisioningserver.utils, 'get_cluster_uuid') + get_cluster_uuid.return_value = 'cluster-' + factory.make_UUID() + + uuid = 'node-' + factory.make_UUID() + macs = sorted(factory.make_mac_address() for _ in range(3)) + arch = factory.make_name('architecture') + power_type = factory.make_name('power_type') + power_parameters = { + 'power_address': factory.make_ipv4_address(), + 'power_user': factory.make_name('power_user'), + 'power_pass': factory.make_name('power_pass'), + 'power_control': None, + 'system_id': uuid + } + new_system_id = yield create_node( + macs, arch, power_type, power_parameters) + self.assertEqual(system_id, new_system_id) + + @defer.inlineCallbacks + def test_passes_on_no_duplicate_macs(self): + protocol, connecting = self.prepare_region_rpc() + self.addCleanup((yield connecting)) + system_id = factory.make_name("system-id") + protocol.CreateNode.return_value = defer.succeed( + {"system_id": system_id}) + get_cluster_uuid = self.patch( + provisioningserver.utils, 'get_cluster_uuid') + get_cluster_uuid.return_value = 'cluster-' + factory.make_UUID() + + uuid = 'node-' + factory.make_UUID() + arch = factory.make_name('architecture') + power_type = factory.make_name('power_type') + power_parameters = { + 'power_address': factory.make_ipv4_address(), + 'power_user': factory.make_name('power_user'), + 'power_pass': factory.make_name('power_pass'), + 'power_control': None, + 'system_id': uuid + } + + # Create a list of MACs with one random duplicate. + macs = sorted(factory.make_mac_address() for _ in range(3)) + macs_with_duplicate = macs + [choice(macs)] + + yield create_node( + macs_with_duplicate, arch, power_type, power_parameters) + self.assertThat( + protocol.CreateNode, MockCalledOnceWith( + protocol, cluster_uuid=get_cluster_uuid.return_value, + architecture=arch, power_type=power_type, + power_parameters=json.dumps(power_parameters), + mac_addresses=macs)) + + @defer.inlineCallbacks + def test_logs_error_on_duplicate_macs(self): + protocol, connecting = self.prepare_region_rpc() + self.addCleanup((yield connecting)) + system_id = factory.make_name("system-id") + maaslog = self.patch(provisioningserver.utils, 'maaslog') + get_cluster_uuid = self.patch( + provisioningserver.utils, 'get_cluster_uuid') + get_cluster_uuid.return_value = 'cluster-' + factory.make_UUID() + + uuid = 'node-' + factory.make_UUID() + macs = sorted(factory.make_mac_address() for _ in range(3)) + arch = factory.make_name('architecture') + power_type = factory.make_name('power_type') + power_parameters = { + 'power_address': factory.make_ipv4_address(), + 'power_user': factory.make_name('power_user'), + 'power_pass': factory.make_name('power_pass'), + 'power_control': None, + 'system_id': uuid + } + + protocol.CreateNode.side_effect = [ + defer.succeed({"system_id": system_id}), + defer.fail(NodeAlreadyExists("Node already exists.")), + ] + + yield create_node( + macs, arch, power_type, power_parameters) + yield create_node( + macs, arch, power_type, power_parameters) + self.assertThat( + maaslog.error, MockCalledOnceWith( + "A node with one of the mac addressess in %s already " + "exists.", macs)) + + +class TestGetClusterConfig(MAASTestCase): + scenarios = [ + ('Variable with quoted value', dict( + contents='MAAS_URL="http://site/MAAS"', + expected={'MAAS_URL': "http://site/MAAS"})), + ('Variable with quoted value, comment', dict( + contents="# Ignore this\nMAAS_URL=\"http://site/MAAS\"", + expected={'MAAS_URL': "http://site/MAAS"})), + ('Two Variables', dict( + contents="CLUSTER_UUID=\"uuid\"\nMAAS_URL=\"http://site/MAAS\"", + expected={ + 'MAAS_URL': "http://site/MAAS", + 'CLUSTER_UUID': "uuid", + })), + ('Variable with single quoted value', dict( + contents="MAAS_URL='http://site/MAAS'", + expected={'MAAS_URL': "http://site/MAAS"})), + ('Variable with unquoted valued', dict( + contents="MAAS_URL=http://site/MAAS", + expected={'MAAS_URL': "http://site/MAAS"})), + ] + + def test_parses_config_file(self): + open_mock = self.patch(provisioningserver.utils, "open") + open_mock.return_value = StringIO(self.contents) + path = factory.make_name('path') + result = get_cluster_config(path) + self.assertThat(open_mock, MockCalledOnceWith(path)) + self.assertItemsEqual(self.expected, result) + + +class TestFlatten(MAASTestCase): + + def test__returns_iterator(self): + self.assertThat(flatten(()), IsInstance(Iterator)) + + def test__returns_empty_when_nothing_provided(self): + self.assertItemsEqual([], flatten([])) + self.assertItemsEqual([], flatten(())) + self.assertItemsEqual([], flatten({})) + self.assertItemsEqual([], flatten(set())) + self.assertItemsEqual([], flatten(([], (), {}, set()))) + self.assertItemsEqual([], flatten(([[]], ((),)))) + + def test__flattens_list(self): + self.assertItemsEqual( + [1, 2, 3, "abc"], flatten([1, 2, 3, "abc"])) + + def test__flattens_nested_lists(self): + self.assertItemsEqual( + [1, 2, 3, "abc"], flatten([[[1, 2, 3, "abc"]]])) + + def test__flattens_arbitrarily_nested_lists(self): + self.assertItemsEqual( + [1, "two", "three", 4, 5, 6], flatten( + [[1], ["two", "three"], [4], [5, 6]])) + + def test__flattens_other_iterables(self): + self.assertItemsEqual( + [1, 2, 3.3, 4, 5, 6], flatten([1, 2, {3.3, 4, (5, 6)}])) + + def test__treats_string_like_objects_as_leaves(self): + # Strings are iterable, but we know they cannot be flattened further. + self.assertItemsEqual(["abcdef"], flatten("abcdef")) + + def test__takes_star_args(self): + self.assertItemsEqual("abcdef", flatten("a", "b", "c", "d", "e", "f")) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_xpath.py maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_xpath.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/tests/test_xpath.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/tests/test_xpath.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,122 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Tests for XPath utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [] + +import doctest +from textwrap import dedent + +from fixtures import FakeLogger +from lxml import etree +from maastesting.matchers import MockCalledOnceWith +from maastesting.testcase import MAASTestCase +from mock import Mock +from provisioningserver.utils.xpath import try_match_xpath +from testscenarios import multiply_scenarios +from testtools.matchers import DocTestMatches + + +class TestTryMatchXPathScenarios(MAASTestCase): + + doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE + + def scenario(name, xpath, doc, expected_result, expected_log=""): + """Return a scenario (for `testscenarios`) to test `try_match_xpath`. + + This is a convenience function to reduce the amount of + boilerplate when constructing `scenarios_inputs` later on. + + The scenario it constructs defines an XML document, and XPath + expression, the expectation as to whether it will match or + not, and the expected log output. + """ + doc = etree.fromstring(doc).getroottree() + return name, dict( + xpath=xpath, doc=doc, expected_result=expected_result, + expected_log=dedent(expected_log)) + + # Exercise try_match_xpath with a variety of different inputs. + scenarios_inputs = ( + scenario( + "expression matches", + "/foo", "", True), + scenario( + "expression does not match", + "/foo", "", False), + scenario( + "text expression matches", + "/foo/text()", 'bar', True), + scenario( + "text expression does not match", + "/foo/text()", '', False), + scenario( + "string expression matches", + "string()", 'bar', True), + scenario( + "string expression does not match", + "string()", '', False), + scenario( + "unrecognised namespace", + "/foo:bar", '', False, + expected_log="""\ + Invalid expression '/foo:bar': Undefined namespace prefix + """), + ) + + # Exercise try_match_xpath with and without compiled XPath + # expressions. + scenarios_xpath_compiler = ( + ("xpath-compiler=XPath", dict(xpath_compile=etree.XPath)), + ("xpath-compiler=None", dict(xpath_compile=lambda expr: expr)), + ) + + # Exercise try_match_xpath with and without documents wrapped in + # an XPathDocumentEvaluator. + scenarios_doc_compiler = ( + ("doc-compiler=XPathDocumentEvaluator", dict( + doc_compile=etree.XPathDocumentEvaluator)), + ("doc-compiler=None", dict(doc_compile=lambda doc: doc)), + ) + + scenarios = multiply_scenarios( + scenarios_inputs, scenarios_xpath_compiler, + scenarios_doc_compiler) + + def setUp(self): + super(TestTryMatchXPathScenarios, self).setUp() + self.logger = self.useFixture(FakeLogger()) + + def test(self): + xpath = self.xpath_compile(self.xpath) + doc = self.doc_compile(self.doc) + self.assertIs(self.expected_result, try_match_xpath(xpath, doc)) + self.assertThat( + self.logger.output, DocTestMatches( + self.expected_log, self.doctest_flags)) + + +class TestTryMatchXPath(MAASTestCase): + + def test_logs_to_specified_logger(self): + xpath = etree.XPath("/foo:bar") + doc = etree.XML("") + root_logger = self.useFixture(FakeLogger()) + callers_logger = Mock() + try_match_xpath(xpath, doc, callers_logger) + self.assertEqual("", root_logger.output) + self.assertThat( + callers_logger.warning, + MockCalledOnceWith( + u"Invalid expression '%s': %s", + u'/foo:bar', u'Undefined namespace prefix')) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/text.py maas-1.7.6+bzr3376/src/provisioningserver/utils/text.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/text.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/text.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Text-processing utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'make_bullet_list', + 'normalise_whitespace', + ] + +from textwrap import TextWrapper + + +def normalise_whitespace(text): + """Replace any whitespace sequence in `text` with just a single space.""" + return ' '.join(text.split()) + + +def make_bullet_list(messages): + """Join `messages` into a bullet list. + + Each message is reformatted to 70 columns wide, indented by 2 columns, + making 72 columns in all. The first line of each message is denoted by a + asterisk in the first column. + + :type messages: An iterable of strings. + :return: A string. + """ + fill = TextWrapper(72, initial_indent="* ", subsequent_indent=" ").fill + return "\n".join(fill(message) for message in messages) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/twisted.py maas-1.7.6+bzr3376/src/provisioningserver/utils/twisted.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/twisted.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/twisted.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,415 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities related to the Twisted/Crochet execution environment.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'asynchronous', + 'callOut', + 'deferred', + 'DeferredValue', + 'deferWithTimeout', + 'FOREVER', + 'pause', + 'reactor_sync', + 'retries', + 'synchronous', + ] + +from contextlib import contextmanager +from functools import ( + partial, + wraps, + ) +import sys +import threading + +from crochet import run_in_reactor +from twisted.internet import reactor +from twisted.internet.defer import ( + AlreadyCalledError, + CancelledError, + Deferred, + maybeDeferred, + succeed, + ) +from twisted.python import threadable +from twisted.python.failure import Failure +from twisted.python.threadable import isInIOThread + + +undefined = object() +FOREVER = object() + + +def deferred(func): + """Decorates a function to ensure that it always returns a `Deferred`. + + This also serves a secondary documentation purpose; functions decorated + with this are readily identifiable as asynchronous. + """ + @wraps(func) + def wrapper(*args, **kwargs): + return maybeDeferred(func, *args, **kwargs) + return wrapper + + +def asynchronous(func=undefined, timeout=undefined): + """Decorates a function to ensure that it always runs in the reactor. + + If the wrapper is called from the reactor thread, it will call straight + through to the wrapped function. It will not be wrapped by `maybeDeferred` + for example. + + If the wrapper is called from another thread, it will return a + :py::class:`crochet.EventualResult`, as if it had been decorated with + `crochet.run_in_reactor`. + + There's an additional convenience. If `timeout` has been specified, the + :py:class:`~crochet.EventualResult` will be waited on for up to `timeout` + seconds. This means that callers don't need to remember to wait. If + `timeout` is `FOREVER` then it will wait indefinitely, which can be useful + where the function itself handles time-outs, or where the called function + doesn't actually defer work but just needs to run in the reactor thread. + + This also serves a secondary documentation purpose; functions decorated + with this are readily identifiable as asynchronous. + + """ + if func is undefined: + return partial(asynchronous, timeout=timeout) + + if timeout is not undefined: + if isinstance(timeout, (int, long, float)): + if timeout < 0: + raise ValueError( + "timeout must be >= 0, not %d" + % timeout) + elif timeout is not FOREVER: + raise ValueError( + "timeout must an int, float, or undefined, not %r" + % (timeout,)) + + func_in_reactor = run_in_reactor(func) + + @wraps(func) + def wrapper(*args, **kwargs): + if isInIOThread(): + return func(*args, **kwargs) + elif timeout is undefined: + return func_in_reactor(*args, **kwargs) + elif timeout is FOREVER: + return func_in_reactor(*args, **kwargs).wait() + else: + return func_in_reactor(*args, **kwargs).wait(timeout) + + return wrapper + + +def synchronous(func): + """Decorator to ensure that `func` never runs in the reactor thread. + + If the wrapped function is called from the reactor thread, this will + raise a :class:`AssertionError`, implying that this is a programming + error. Calls from outside the reactor will proceed unaffected. + + There is an asymmetry with the `asynchronous` decorator. The reason + is that it is essential to be aware when `deferToThread()` is being + used, so that in-reactor code knows to synchronise with it, to add a + callback to the :class:`Deferred` that it returns, for example. The + expectation with `asynchronous` is that the return value is always + important, and will be appropriate to the environment in which it is + utilised. + + This also serves a secondary documentation purpose; functions decorated + with this are readily identifiable as synchronous, or blocking. + + :raises AssertionError: When called inside the reactor thread. + """ + @wraps(func) + def wrapper(*args, **kwargs): + # isInIOThread() can return True if the reactor has previously been + # started but has now stopped, so don't test isInIOThread() until + # we've also checked if the reactor is running. + if reactor.running and isInIOThread(): + raise AssertionError( + "Function %s(...) must not be called in the " + "reactor thread." % func.__name__) + else: + return func(*args, **kwargs) + return wrapper + + +@contextmanager +def reactor_sync(): + """Context manager that synchronises with the reactor thread. + + When holding this context the reactor thread is suspended, and the current + thread is marked as the IO thread. You can then do almost any work that + you would normally do in the reactor thread. + + The "almost" above refers to things that track state by thread, which with + Twisted is not much. However, things like :py:mod:`twisted.python.context` + may not behave quite as you expect. + """ + # If we're already running in the reactor thread this is a no-op; we're + # already synchronised with the execution of the reactor. + if isInIOThread(): + yield + return + + # If we're not running in the reactor thread, we need to synchronise + # execution, being careful to avoid deadlocks. + sync = threading.Condition() + + # When calling sync.wait() we specify a timeout of sys.maxint. The default + # timeout of None cannot be interrupted by SIGINT, aka Ctrl-C, which can + # be more than a little frustrating. + + def sync_io(): + # This runs in the reactor's thread. It first gets a lock on `sync`. + with sync: + # This then notifies a single waiter. That waiter will be the + # thread that this context-manager was invoked from. + sync.notify() + # This then waits to be notified back. During this time the + # reactor cannot run. + sync.wait(sys.maxint) + + # Grab a lock on the `sync` condition. + with sync: + # Schedule `sync_io` to be called in the reactor. We do this with the + # lock held so that `sync_io` cannot progress quite yet. + reactor.callFromThread(sync_io) + # Now, wait. This allows `sync_io` obtain the lock on `sync`, and then + # awaken me via `notify()`. When `wait()` returns we once again have a + # lock on `sync`. We're able to get this lock because `sync_io` goes + # into `sync.wait()`, thus releasing its lock on it. + sync.wait(sys.maxint) + # Record the reactor's thread. This is safe to do now that we're + # synchronised with the reactor. + reactorThread = threadable.ioThread + try: + # Mark the current thread as the IO thread. This makes the + # `asynchronous` and `synchronous` decorators DTRT. + threadable.ioThread = threadable.getThreadID() + # Allow this thread to execute while holding `sync`. The reactor + # is prevented from spinning because `sync_io` is in `wait()`. + yield + finally: + # Restore the IO thread. + threadable.ioThread = reactorThread + # Wake up `sync_io`, which can then run to completion, though not + # until we release our lock on `sync` by exiting this context. + sync.notify() + + +def retries(timeout=30, interval=1, clock=reactor): + """Helper for retrying something, sleeping between attempts. + + Yields ``(elapsed, remaining, wait)`` tuples, giving times in + seconds. The last item, `wait`, is the suggested amount of time to + sleep before trying again. + + @param timeout: From now, how long to keep iterating, in seconds. + @param interval: The sleep between each iteration, in seconds. + @param clock: An optional `IReactorTime` provider. Defaults to the + installed reactor. + + """ + start = clock.seconds() + end = start + timeout + while True: + now = clock.seconds() + if now < end: + wait = min(interval, end - now) + yield now - start, end - now, wait + else: + yield now - start, end - now, 0 + break + + +def pause(duration, clock=reactor): + """Pause execution for `duration` seconds. + + Returns a `Deferred` that will fire after `duration` seconds. + """ + d = Deferred(lambda d: dc.cancel()) + dc = clock.callLater(duration, d.callback, None) + return d + + +def deferWithTimeout(timeout, func=None, *args, **kwargs): + """Call `func`, returning a `Deferred`. + + The `Deferred` will be cancelled after `timeout` seconds if not otherwise + called. + + If `func` is not specified, or None, this will return a new + :py:class:`Deferred` instance that will be cancelled after `timeout` + seconds. Do not specify `args` or `kwargs` if `func` is `None`. + + :param timeout: The number of seconds before cancelling `d`. + :param func: A callable, or `None`. + :param args: Positional arguments to pass to `func`. + :param kwargs: Keyword arguments to pass to `func`. + """ + if func is None and len(args) == len(kwargs) == 0: + d = Deferred() + else: + d = maybeDeferred(func, *args, **kwargs) + + timeoutCall = reactor.callLater(timeout, d.cancel) + + def done(result): + if timeoutCall.active(): + timeoutCall.cancel() + return result + + return d.addBoth(done) + + +def callOut(func, *args, **kwargs): + """Wrap a function call so it can be used as a transparent callback. + + For example:: + + d = client.fetchSomethingReallyImportant() + d.addCallback(callOut(updateStats)) + d.addCallback(doSomethingWithReallyImportantThing) + + Use this where you need a side-effect as a :py:class:`~Deferred` is fired, + but you don't want to clobber the result being propagated with the return + value from the call to the given function. + + Not that the result being passed through is *not* passed to the function. + + Note also that if the call-out raises an exception, this will be + propagated; nothing is done to suppress the exception or preserve the + result in this case. + """ + def callCallOut(thing): + func(*args, **kwargs) + return thing + return callCallOut + + +class DeferredValue: + """Coordination primitive for a value. + + Or a "Future", or a "Promise", or ... + + :ivar waiters: A set of :py:class:`Deferreds`, each of which has been + handed to a caller of `get`, and which will be fired when `set` is + called... or ``None``, immediately after `set` has been called. + """ + + def __init__(self): + super(DeferredValue, self).__init__() + self.waiters = set() + + def set(self, value): + """Set the promised value. + + Notifies all waiters of the value, or raises `AlreadyCalledError` if + the value has been set previously. + """ + if self.waiters is None: + raise AlreadyCalledError( + "Value already set to %r." % (self.value,)) + + self.value = value + waiters, self.waiters = self.waiters, None + for waiter in waiters.copy(): + waiter.callback(value) + + def fail(self, failure=None): + """Set the promised value to a `Failure`. + + Notifies all waiters via `errback`, or raises `AlreadyCalledError` if + the value has been set previously. + """ + if not isinstance(failure, Failure): + failure = Failure(failure) + + self.set(failure) + + def capture(self, d): + """Capture the result of `d`. + + The result (or failure) coming out of `d` will be saved in this + `DeferredValue`, and the result passed down `d`'s chain will be + `None`. + + :param d: :py:class:`Deferred`. + """ + return d.addCallbacks(self.set, self.fail) + + def observe(self, d): + """Observe the result of `d`. + + The result (or failure) coming out of `d` will be saved in this + `DeferredValue`, but the result (or failure) will be propagated + intact. + + :param d: :py:class:`Deferred`. + """ + def set_and_return(value): + self.set(value) + return value + + def fail_and_return(failure): + self.fail(failure) + return failure + + return d.addCallbacks( + set_and_return, fail_and_return) + + def get(self, timeout=None): + """Get a promise for the value. + + Returns a `Deferred` that will fire with the value when it's made + available, or with `CancelledError` if this object is cancelled. + + If a time-out in seconds is specified, the `Deferred` will be + cancelled if the value is not made available within the time. + """ + if self.waiters is None: + return succeed(self.value) + + if timeout is None: + d = Deferred() + else: + d = deferWithTimeout(timeout) + + def remove(result, discard, d): + discard(d) # Remove d from the waiters list. + return result # Pass-through the result. + + d.addBoth(remove, self.waiters.discard, d) + self.waiters.add(d) + return d + + def cancel(self): + """Cancel all waiters and prevent further use of this object. + + After cancelling, `AlreadyCalledError` will be raised if `set` is + called, and the `Deferred` returned from `get` will have already been + cancelled. + """ + if self.waiters is None: + return + + self.value = Failure(CancelledError()) + waiters, self.waiters = self.waiters, None + for waiter in waiters.copy(): + waiter.cancel() diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/url.py maas-1.7.6+bzr3376/src/provisioningserver/utils/url.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/url.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/url.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,51 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""Utilities for URL handling.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'compose_URL', + ] + +import re +import urllib +from urlparse import ( + urlparse, + urlunparse, + ) + + +def compose_URL(base_url, host): + """Produce a URL on a given hostname or IP address. + + This is straightforward if the IP address is a hostname or an IPv4 + address; but if it's an IPv6 address, the URL must contain the IP address + in square brackets as per RFC 3986. + + :param base_url: URL without the host part, e.g. `http:///path'. + :param host: Host name or IP address to insert in the host part of the URL. + :return: A URL string with the host part taken from `host`, and all others + from `base_url`. + """ + if re.match('[:.0-9a-fA-F]+(?:%.+)?$', host) and host.count(':') > 0: + # IPv6 address, without the brackets. Add square brackets. + # In case there's a zone index (introduced by a % sign), escape it. + netloc_host = '[%s]' % urllib.quote(host, safe=':') + else: + # IPv4 address, hostname, or IPv6 with brackets. Keep as-is. + netloc_host = host + parsed_url = urlparse(base_url) + if parsed_url.port is None: + netloc = netloc_host + else: + netloc = '%s:%d' % (netloc_host, parsed_url.port) + return urlunparse(parsed_url._replace(netloc=netloc)) diff -Nru maas-1.5.4+bzr2294/src/provisioningserver/utils/xpath.py maas-1.7.6+bzr3376/src/provisioningserver/utils/xpath.py --- maas-1.5.4+bzr2294/src/provisioningserver/utils/xpath.py 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/src/provisioningserver/utils/xpath.py 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,76 @@ +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +"""XPath-related utilities.""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +str = None + +__metaclass__ = type +__all__ = [ + 'try_match_xpath', + ] + +import logging + +from lxml import etree + + +def is_compiled_xpath(xpath): + """Is `xpath` a compiled expression?""" + return isinstance(xpath, etree.XPath) + + +def is_compiled_doc(doc): + """Is `doc` a compiled XPath document evaluator?""" + return isinstance(doc, etree.XPathDocumentEvaluator) + + +def match_xpath(xpath, doc): + """Return a match of expression `xpath` against document `doc`. + + :type xpath: Either `unicode` or `etree.XPath` + :type doc: Either `etree._ElementTree` or `etree.XPathDocumentEvaluator` + + :rtype: bool + """ + is_xpath_compiled = is_compiled_xpath(xpath) + is_doc_compiled = is_compiled_doc(doc) + + if is_xpath_compiled and is_doc_compiled: + return doc(xpath.path) + elif is_xpath_compiled: + return xpath(doc) + elif is_doc_compiled: + return doc(xpath) + else: + return doc.xpath(xpath) + + +def try_match_xpath(xpath, doc, logger=logging): + """See if the XPath expression matches the given XML document. + + Invalid XPath expressions are logged, and are returned as a + non-match. + + :type xpath: Either `unicode` or `etree.XPath` + :type doc: Either `etree._ElementTree` or `etree.XPathDocumentEvaluator` + + :rtype: bool + """ + try: + # Evaluating an XPath expression against a document with LXML + # can return a list or a string, and perhaps other types. + # Casting the return value into a boolean context appears to + # be the most reliable way of detecting a match. + return bool(match_xpath(xpath, doc)) + except etree.XPathEvalError as error: + # Get a plaintext version of `xpath`. + expr = xpath.path if is_compiled_xpath(xpath) else xpath + logger.warning("Invalid expression '%s': %s", expr, unicode(error)) + return False diff -Nru maas-1.5.4+bzr2294/utilities/check-maaslog-exception maas-1.7.6+bzr3376/utilities/check-maaslog-exception --- maas-1.5.4+bzr2294/utilities/check-maaslog-exception 1970-01-01 00:00:00.000000000 +0000 +++ maas-1.7.6+bzr3376/utilities/check-maaslog-exception 2015-07-10 01:27:14.000000000 +0000 @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Copyright 2014 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +# Check for use of "maaslog.exception" in the source tree. Code should use +# "maaslog.error" instead. +# +# Usage: check-maaslog-exception [branch] +# +# Where branch is an optional branch; defaults to the current directory. + +# Exit immediately if a command exits with a non-zero status. +set -o errexit +# Treat unset variables as an error when substituting. +set -o nounset + + +# Look for use of maaslog.exception in the source tree. Not tests, since +# they may also look for this mistake. +find_incorrect_usage() { + # Find all Python files in src, except tests, + # which have "maaslog.exception(" on a line that isn't a comment. + find $1/src -name \*.py ! -path '*/tests/*' -print0 | + xargs -r0 grep -n '^[^#]*\&2 <